move DPDK version up to 18.11

This commit is contained in:
Amy Ousterhout 2018-12-14 14:05:25 -05:00
parent 496684e007
commit 86a3e18c36
7 changed files with 401 additions and 14 deletions

View File

@ -71,7 +71,6 @@ ifneq ($(MLX),)
DPDK_LIBS += -Wl,-whole-archive -lrte_pmd_mlx4 -Wl,-no-whole-archive
DPDK_LIBS += -Wl,-whole-archive -libverbs -Wl,-no-whole-archive
DPDK_LIBS += -Wl,-whole-archive -lmlx4 -Wl,-no-whole-archive
DPDK_LIBS += -Wl,-whole-archive -lrte_kvargs -Wl,-no-whole-archive
endif
# must be first

2
dpdk

@ -1 +1 @@
Subproject commit f38e484e5272b162706f00529aa795a9d97b0244
Subproject commit 0da7f445df445630c794897347ee360d6fe6348b

View File

@ -7,10 +7,10 @@ git submodule init
git submodule update --recursive
# Apply driver patches
patch -p 1 -d dpdk/ < ixgbe.patch
patch -p 1 -d dpdk/ < ixgbe_18_11.patch
if lspci | grep -q Mellanox; then
patch -p 1 -d dpdk/ < mlx.patch
patch -p 1 -d dpdk/ < mlx4_18_11.patch
sed -i 's/CONFIG_RTE_LIBRTE_MLX4_PMD=n/CONFIG_RTE_LIBRTE_MLX4_PMD=y/g' dpdk/config/common_base
fi

View File

@ -51,7 +51,7 @@
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
.hw_ip_checksum = 1,
.offloads = DEV_RX_OFFLOAD_IPV4_CKSUM,
.mq_mode = ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
@ -60,6 +60,9 @@ static const struct rte_eth_conf port_conf_default = {
.rss_hf = ETH_RSS_UDP,
},
},
.txmode = {
.offloads = DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM,
},
};
/*
@ -78,7 +81,7 @@ static inline int dpdk_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_txconf *txconf;
struct rte_eth_rxconf *rxconf;
if (port >= rte_eth_dev_count())
if (!rte_eth_dev_is_valid_port(port))
return -1;
/* Configure the Ethernet device. */
@ -106,8 +109,6 @@ static inline int dpdk_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
txconf = &dev_info.default_txconf;
txconf->tx_rs_thresh = 64;
txconf->tx_free_thresh = 64;
txconf->txq_flags &= ~(ETH_TXQ_FLAGS_NOXSUMUDP |
ETH_TXQ_FLAGS_NOXSUMTCP);
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
@ -165,7 +166,6 @@ void dpdk_print_eth_stats()
*/
int dpdk_init()
{
unsigned nb_ports;
char *argv[4];
char buf[10];
@ -185,8 +185,7 @@ int dpdk_init()
}
/* check that there is a port to send/receive on */
nb_ports = rte_eth_dev_count();
if (nb_ports < 1) {
if (!rte_eth_dev_is_valid_port(0)) {
log_err("dpdk: no available ports");
return -1;
}

View File

@ -204,8 +204,8 @@ static struct rte_mempool *rx_pktmbuf_pool_create_in_shm(const char *name,
struct rte_pktmbuf_pool_private mbp_priv;
struct rte_mempool *mp;
int ret;
size_t pg_size, pg_shift, min_chunk_size, align, len;
void *shbuf;
size_t total_elt_sz, pg_size, pg_shift, len;
/* create rte_mempool */
if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) {
@ -230,10 +230,9 @@ static struct rte_mempool *rx_pktmbuf_pool_create_in_shm(const char *name,
rte_pktmbuf_pool_init(mp, &mbp_priv);
/* check necessary size and map shared memory */
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
pg_size = PGSIZE_2MB;
pg_shift = rte_bsf32(pg_size);
len = rte_mempool_xmem_size(n, total_elt_sz, pg_shift, mp->flags);
len = rte_mempool_ops_calc_mem_size(mp, n, pg_shift, &min_chunk_size, &align);
if (len > INGRESS_MBUF_SHM_SIZE) {
log_err("rx: shared memory is too small for number of mbufs");
goto fail_free_mempool;

155
ixgbe_18_11.patch Normal file
View File

@ -0,0 +1,155 @@
From cb179332767d5f421e3bd446e7fc72133d44167e Mon Sep 17 00:00:00 2001
From: Amy Ousterhout <aousterh@mit.edu>
Date: Mon, 10 Dec 2018 16:02:53 -0500
Subject: [PATCH] new ixgbe patch
---
drivers/net/ixgbe/ixgbe_rxtx.c | 48 +++++++++++++++++++++++++++++--
drivers/net/ixgbe/ixgbe_rxtx.h | 6 ++--
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 6 ++--
drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 15 +++++++++-
4 files changed, 66 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index ddc7efa87..279d35086 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -558,10 +558,10 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
/* Default RS bit threshold values */
#ifndef DEFAULT_TX_RS_THRESH
-#define DEFAULT_TX_RS_THRESH 32
+#define DEFAULT_TX_RS_THRESH 64
#endif
#ifndef DEFAULT_TX_FREE_THRESH
-#define DEFAULT_TX_FREE_THRESH 32
+#define DEFAULT_TX_FREE_THRESH 64
#endif
/* Reset transmit descriptors after they have been used */
@@ -2374,6 +2374,41 @@ static const struct ixgbe_txq_ops def_txq_ops = {
.reset = ixgbe_reset_tx_queue,
};
+static uint16_t
+install_txq_ctx(struct ixgbe_tx_queue *txq, uint16_t slot,
+ const uint64_t flags, const union ixgbe_tx_offload tx_offload)
+{
+
+ /* This must be called before any packets are placed in the ring */
+ ASSERT(txq->tx_tail == 0);
+ ASSERT(((struct ixgbe_tx_entry_v *)txq->sw_ring_v)->mbuf == NULL);
+ ASSERT(txq->nb_tx_free >= txq->tx_free_thresh);
+ ASSERT(txq->nb_tx_desc > 0);
+ ASSERT(txq->nb_tx_free >= 1);
+ ASSERT(slot < IXGBE_CTX_NUM);
+
+
+ uint64_t tx_ol_req = flags & IXGBE_TX_OFFLOAD_MASK;
+
+ volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
+ volatile struct ixgbe_adv_tx_context_desc *ctx_txd;
+ ctx_txd = (volatile struct ixgbe_adv_tx_context_desc *)txr;
+
+ txq->ctx_curr = slot;
+ ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ tx_offload, NULL);
+
+ txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - 1);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (unsigned) txq->port_id, (unsigned) txq->queue_id,
+ (unsigned) 1, (unsigned) 1);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, 1);
+ txq->tx_tail = 1;
+ return 1;
+}
+
/* Takes an ethdev and a queue and sets up the tx function to be used based on
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
@@ -2635,6 +2670,15 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
dev->data->tx_queues[queue_idx] = txq;
+ uint64_t olflags = PKT_TX_IP_CKSUM | PKT_TX_IPV4 | PKT_TX_TCP_CKSUM;
+ union ixgbe_tx_offload tx_offloads;
+ memset(&tx_offloads, 0, sizeof(tx_offloads));
+ tx_offloads.l2_len = ETHER_HDR_LEN;
+ tx_offloads.l3_len = sizeof(struct ipv4_hdr);
+ tx_offloads.l4_len = sizeof(struct tcp_hdr);
+
+ if (install_txq_ctx(txq, 0, olflags, tx_offloads) != 1)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 39378f754..f869fcec6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -27,14 +27,14 @@
#define IXGBE_MIN_RING_DESC 32
#define IXGBE_MAX_RING_DESC 4096
-#define RTE_PMD_IXGBE_TX_MAX_BURST 32
-#define RTE_PMD_IXGBE_RX_MAX_BURST 32
+#define RTE_PMD_IXGBE_TX_MAX_BURST 64
+#define RTE_PMD_IXGBE_RX_MAX_BURST 64
#define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
#define RTE_IXGBE_DESCS_PER_LOOP 4
#ifdef RTE_IXGBE_INC_VECTOR
-#define RTE_IXGBE_RXQ_REARM_THRESH 32
+#define RTE_IXGBE_RXQ_REARM_THRESH 64
#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
#endif
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index a97c27189..d2e02411e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -94,9 +94,9 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
- m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m != NULL)) {
- free[0] = m;
+ if (likely(txep[0].mbuf &&
+ (m = rte_pktmbuf_prefree_seg(txep[0].mbuf)) != NULL)) {
+ free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index c9ba48246..05c3130eb 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -618,7 +618,20 @@ static inline void
vtx1(volatile union ixgbe_adv_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
{
- __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
+ /* Set Packet Length */
+ uint64_t top_flags = (uint64_t)pkt->pkt_len << 46;
+
+ /* Set IXGBE_TXD_POPTS_IXSM */
+ top_flags |= (pkt->ol_flags & PKT_TX_IP_CKSUM) >> 14;
+ RTE_BUILD_BUG_ON(
+ PKT_TX_IP_CKSUM >> 14 != (uint64_t)IXGBE_TXD_POPTS_IXSM << 40);
+
+ /* Set IXGBE_TXD_POPTS_TXSM */
+ top_flags |= (pkt->ol_flags & PKT_TX_TCP_CKSUM) >> 11;
+ RTE_BUILD_BUG_ON(
+ PKT_TX_TCP_CKSUM >> 11 != (uint64_t)IXGBE_TXD_POPTS_TXSM << 40);
+
+ __m128i descriptor = _mm_set_epi64x(top_flags |
flags | pkt->data_len,
pkt->buf_iova + pkt->data_off);
_mm_store_si128((__m128i *)&txdp->read, descriptor);
--
2.13.0

235
mlx4_18_11.patch Normal file
View File

@ -0,0 +1,235 @@
From 458bb8f0deab3c7fec6ed16706a7f2ad6a75e87c Mon Sep 17 00:00:00 2001
From: Amy Ousterhout <aousterh@mit.edu>
Date: Wed, 12 Dec 2018 17:44:10 -0500
Subject: [PATCH] mlx4 patch for DPDK 18.11
---
drivers/net/mlx4/mlx4_mr.c | 110 +++++++++++++++++++++++++++++++++++
drivers/net/mlx4/mlx4_mr.h | 9 +++
drivers/net/mlx4/mlx4_rxtx.c | 2 +-
drivers/net/mlx4/mlx4_rxtx.h | 46 +++++++++++++++
4 files changed, 166 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index a0094483a..d75d659f8 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -1358,3 +1358,113 @@ mlx4_mr_release(struct rte_eth_dev *dev)
/* Free all remaining MRs. */
mlx4_mr_garbage_collect(dev);
}
+
+/**
+ * Create a new memory region for a custom memory pool.
+ *
+ * @param dev
+ * Pointer to Ethernet device
+ * @param addr
+ * Start address of memory region
+ * @param len
+ * Length of memory region
+ * @param socket_id
+ * Socket to create the memory region description on
+ */
+struct mlx4_mr *
+mlx4_mr_create_custom(struct rte_eth_dev *dev, uintptr_t addr,
+ uint32_t len, int socket_id)
+{
+ struct mlx4_mr *mr = NULL;
+ struct priv *priv = dev->data->dev_private;
+
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (mr == NULL) {
+ WARN("port %u unable to allocate memory for a new MR",
+ dev->data->port_id);
+ return NULL;
+ }
+ DEBUG("port %u register MR for custom mempool", dev->data->port_id);
+ mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)addr, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ WARN("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_free(mr);
+ return NULL;
+ }
+ mr->msl = NULL; /* Mark it is external memory. */
+ mr->ms_bmp = NULL;
+ mr->ms_n = 1;
+ mr->ms_bmp_n = 1;
+ DEBUG("port %u MR CREATED (%p) for external memory %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+
+ return mr;
+}
+
+/**
+ * Add memory region (MR) <-> memory id association to mr_ctrl->id2mr[].
+ * If id2mr[] is full, remove an entry first.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param[in] m
+ * Info about region for which a memory region lkey must be added.
+ * @param[in] i
+ * Index in memory pool (MP) where to add memory region (MR).
+ * @param mp
+ * Mempool for which we're adding the mem region
+ *
+ * @return
+ * Added mr->lkey on success, (uint32_t)-1 on failure.
+ */
+uint32_t
+mlx4_txq_add_mr_from_mem_info(struct txq *txq, struct mem_info *m, uint32_t i,
+ struct rte_mempool *mp)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct mlx4_mr *mr;
+
+ if (i != RTE_DIM(mr_ctrl->id2mr) && mr_ctrl->id2mr[i].id != 0) {
+ /* Need to replace an existing entry. */
+ DEBUG("%p: replacing possibly stale MR", (void *)mr_ctrl);
+ mr_free(mr_ctrl->id2mr[i].mr);
+ }
+
+ /* Add a new entry, register MR first. */
+ DEBUG("%p: discovered new mem info", (void *)mr_ctrl);
+ mr = mlx4_mr_create_custom(txq->priv->dev, m->start_addr,
+ m->end_addr - m->start_addr, mp->socket_id);
+ if (unlikely(mr == NULL)) {
+ DEBUG("%p: unable to configure MR, mlx4_mr_get() failed",
+ (void *)txq);
+ return (uint32_t)-1;
+ }
+ if (unlikely(i == RTE_DIM(mr_ctrl->id2mr))) {
+ /* Table is full, remove oldest entry. */
+ DEBUG("%p: MR <-> ID table full, dropping oldest entry.",
+ (void *)mr_ctrl);
+ --i;
+ mr_free(mr_ctrl->id2mr[0].mr);
+ memmove(&mr_ctrl->id2mr[0], &mr_ctrl->id2mr[1],
+ (sizeof(mr_ctrl->id2mr) - sizeof(mr_ctrl->id2mr[0])));
+ }
+ /* Store the new entry. */
+ mr_ctrl->id2mr[i].id = m->unique_id;
+ mr_ctrl->id2mr[i].mr = mr;
+ mr_ctrl->id2mr[i].start = m->start_addr;
+ mr_ctrl->id2mr[i].end = m->end_addr;
+ mr_ctrl->id2mr[i].lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ DEBUG("%p: new MR lkey for mem_info %p %p: 0x%08" PRIu32,
+ (void *)mr_ctrl, (void *) m->start_addr, (void *) m->end_addr,
+ mr_ctrl->id2mr[i].lkey);
+ return mr_ctrl->id2mr[i].lkey;
+}
diff --git a/drivers/net/mlx4/mlx4_mr.h b/drivers/net/mlx4/mlx4_mr.h
index 37a365a8b..d1b7ba0d9 100644
--- a/drivers/net/mlx4/mlx4_mr.h
+++ b/drivers/net/mlx4/mlx4_mr.h
@@ -64,6 +64,13 @@ struct mlx4_mr_ctrl {
uint16_t head; /* Index of the oldest entry in top-half cache. */
struct mlx4_mr_cache cache[MLX4_MR_CACHE_N]; /* Cache for top-half. */
struct mlx4_mr_btree cache_bh; /* Cache for bottom-half. */
+ struct {
+ uintptr_t id; /**< id of mem region (proc->uniqid). */
+ struct mlx4_mr *mr; /**< Memory region. */
+ uintptr_t start;
+ uintptr_t end;
+ uint32_t lkey; /**< mr->lkey copy. */
+ } __rte_packed id2mr[MLX4_MR_CACHE_N]; /**< id to MR translation table. */
} __rte_packed;
extern struct mlx4_dev_list mlx4_mem_event_cb_list;
@@ -81,6 +88,8 @@ int mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
struct rte_mempool *mp);
void mlx4_mr_dump_dev(struct rte_eth_dev *dev);
void mlx4_mr_release(struct rte_eth_dev *dev);
+struct mlx4_mr *mlx4_mr_create_custom(struct rte_eth_dev *dev, uintptr_t addr,
+ uint32_t len, int socket_id);
/**
* Look up LKey from given lookup table by linear search. Firstly look up the
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index 8c88effcd..9f98e6533 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -949,7 +949,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
elt->buf = NULL;
break;
}
- lkey = mlx4_tx_mb2mr(txq, buf);
+ lkey = mlx4_tx_mb2mr_custom(txq, buf);
if (unlikely(lkey == (uint32_t)-1)) {
/* MR does not exist. */
DEBUG("%p: unable to get MP <-> MR association",
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index d7ec4e0c5..2dbed57c7 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -165,6 +165,13 @@ uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
uint32_t mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb);
uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
struct rte_mempool *mp);
+struct mem_info {
+ uintptr_t unique_id;
+ uintptr_t start_addr;
+ uintptr_t end_addr;
+};
+uint32_t mlx4_txq_add_mr_from_mem_info(struct txq *txq, struct mem_info *m,
+ uint32_t i, struct rte_mempool *mp);
/**
* Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
@@ -243,4 +250,43 @@ mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
return mlx4_tx_mb2mr_bh(txq, mb);
}
+#define __max(x,y) ((x) > (y) ? (x) : (y))
+#define __min(x,y) ((x) < (y) ? (x) : (y))
+/**
+ * Query LKey from a packet buffer for Tx. If not found, register the memory.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mb
+ * Buffer for which a memory region lkey must be returned.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx4_tx_mb2mr_custom(struct txq *txq, struct rte_mbuf *mb)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ unsigned int i;
+ struct mem_info *m = (struct mem_info *)(((char *) mb) +
+ sizeof(struct rte_mbuf));
+
+ for (i = 0; (i != RTE_DIM(mr_ctrl->id2mr)); ++i) {
+ if (unlikely(mr_ctrl->id2mr[i].id == 0)) {
+ /* Unknown region, add a new MR for it. */
+ break;
+ }
+ if (mr_ctrl->id2mr[i].id == m->unique_id) {
+ /* Found region. */
+ return mr_ctrl->id2mr[i].lkey;
+ }
+ if (__max(mr_ctrl->id2mr[i].start, m->start_addr) <
+ __min(mr_ctrl->id2mr[i].end, m->end_addr)) {
+ /* Recreate lkey/mr for previously mapped regions */
+ return mlx4_txq_add_mr_from_mem_info(txq, m, i, mlx4_mb2mp(mb));
+ }
+ }
+ return mlx4_txq_add_mr_from_mem_info(txq, m, i, mlx4_mb2mp(mb));
+}
+
#endif /* MLX4_RXTX_H_ */
--
2.17.1