mlx5: remove one indirection level from Rx/Tx
Avoid dereferencing pointers twice to get to fast Verbs functions by storing them directly in RX/TX queue structures. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Signed-off-by: Yaacov Hazan <yaacovh@mellanox.com>
This commit is contained in:
parent
806af69386
commit
e16820236d
@ -61,6 +61,7 @@ CFLAGS += -g
|
||||
CFLAGS += -I.
|
||||
CFLAGS += -D_XOPEN_SOURCE=600
|
||||
CFLAGS += $(WERROR_FLAGS)
|
||||
CFLAGS += -Wno-strict-prototypes
|
||||
LDLIBS += -libverbs
|
||||
|
||||
# A few warnings cannot be avoided in external headers.
|
||||
|
@ -901,6 +901,8 @@ rxq_cleanup(struct rxq *rxq)
|
||||
rxq_free_elts_sp(rxq);
|
||||
else
|
||||
rxq_free_elts(rxq);
|
||||
rxq->poll = NULL;
|
||||
rxq->recv = NULL;
|
||||
if (rxq->if_wq != NULL) {
|
||||
assert(rxq->priv != NULL);
|
||||
assert(rxq->priv->ctx != NULL);
|
||||
@ -1103,6 +1105,10 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
|
||||
err = EIO;
|
||||
goto error;
|
||||
}
|
||||
if (tmpl.sp)
|
||||
tmpl.recv = tmpl.if_wq->recv_sg_list;
|
||||
else
|
||||
tmpl.recv = tmpl.if_wq->recv_burst;
|
||||
error:
|
||||
*rxq = tmpl;
|
||||
assert(err >= 0);
|
||||
@ -1345,6 +1351,16 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
|
||||
*rxq = tmpl;
|
||||
DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
|
||||
assert(ret == 0);
|
||||
/* Assign function in queue. */
|
||||
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
|
||||
rxq->poll = rxq->if_cq->poll_length_flags_cvlan;
|
||||
#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
rxq->poll = rxq->if_cq->poll_length_flags;
|
||||
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
if (rxq->sp)
|
||||
rxq->recv = rxq->if_wq->recv_sg_list;
|
||||
else
|
||||
rxq->recv = rxq->if_wq->recv_burst;
|
||||
return 0;
|
||||
error:
|
||||
rxq_cleanup(&tmpl);
|
||||
|
@ -93,7 +93,7 @@ txq_complete(struct txq *txq)
|
||||
DEBUG("%p: processing %u work requests completions",
|
||||
(void *)txq, elts_comp);
|
||||
#endif
|
||||
wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp);
|
||||
wcs_n = txq->poll_cnt(txq->cq, elts_comp);
|
||||
if (unlikely(wcs_n == 0))
|
||||
return 0;
|
||||
if (unlikely(wcs_n < 0)) {
|
||||
@ -538,14 +538,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
/* Put packet into send queue. */
|
||||
#if MLX5_PMD_MAX_INLINE > 0
|
||||
if (length <= txq->max_inline)
|
||||
err = txq->if_qp->send_pending_inline
|
||||
err = txq->send_pending_inline
|
||||
(txq->qp,
|
||||
(void *)addr,
|
||||
length,
|
||||
send_flags);
|
||||
else
|
||||
#endif
|
||||
err = txq->if_qp->send_pending
|
||||
err = txq->send_pending
|
||||
(txq->qp,
|
||||
addr,
|
||||
length,
|
||||
@ -567,7 +567,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
goto stop;
|
||||
RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
|
||||
/* Put SG list into send queue. */
|
||||
err = txq->if_qp->send_pending_sg_list
|
||||
err = txq->send_pending_sg_list
|
||||
(txq->qp,
|
||||
sges,
|
||||
ret.num,
|
||||
@ -599,7 +599,7 @@ stop:
|
||||
txq->stats.opackets += i;
|
||||
#endif
|
||||
/* Ring QP doorbell. */
|
||||
err = txq->if_qp->send_flush(txq->qp);
|
||||
err = txq->send_flush(txq->qp);
|
||||
if (unlikely(err)) {
|
||||
/* A nonzero value is not supposed to be returned.
|
||||
* Nothing can be done about it. */
|
||||
@ -733,14 +733,7 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
/* Sanity checks. */
|
||||
assert(elts_head < rxq->elts_n);
|
||||
assert(rxq->elts_head < rxq->elts_n);
|
||||
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
|
||||
ret = rxq->if_cq->poll_length_flags_cvlan(rxq->cq, NULL, NULL,
|
||||
&flags, &vlan_tci);
|
||||
#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
|
||||
&flags);
|
||||
(void)vlan_tci;
|
||||
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
|
||||
if (unlikely(ret < 0)) {
|
||||
struct ibv_wc wc;
|
||||
int wcs_n;
|
||||
@ -877,9 +870,7 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
rxq->stats.ibytes += pkt_buf_len;
|
||||
#endif
|
||||
repost:
|
||||
ret = rxq->if_wq->recv_sg_list(rxq->wq,
|
||||
elt->sges,
|
||||
RTE_DIM(elt->sges));
|
||||
ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges));
|
||||
if (unlikely(ret)) {
|
||||
/* Inability to repost WRs is fatal. */
|
||||
DEBUG("%p: recv_sg_list(): failed (ret=%d)",
|
||||
@ -950,14 +941,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
*/
|
||||
rte_prefetch0(seg);
|
||||
rte_prefetch0(&seg->cacheline1);
|
||||
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
|
||||
ret = rxq->if_cq->poll_length_flags_cvlan(rxq->cq, NULL, NULL,
|
||||
&flags, &vlan_tci);
|
||||
#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
|
||||
&flags);
|
||||
(void)vlan_tci;
|
||||
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
|
||||
if (unlikely(ret < 0)) {
|
||||
struct ibv_wc wc;
|
||||
int wcs_n;
|
||||
@ -1049,7 +1033,7 @@ repost:
|
||||
#ifdef DEBUG_RECV
|
||||
DEBUG("%p: reposting %u WRs", (void *)rxq, i);
|
||||
#endif
|
||||
ret = rxq->if_wq->recv_burst(rxq->wq, sges, i);
|
||||
ret = rxq->recv(rxq->wq, sges, i);
|
||||
if (unlikely(ret)) {
|
||||
/* Inability to repost WRs is fatal. */
|
||||
DEBUG("%p: recv_burst(): failed (ret=%d)",
|
||||
|
@ -107,12 +107,8 @@ struct rxq {
|
||||
struct rte_mempool *mp; /* Memory Pool for allocations. */
|
||||
struct ibv_cq *cq; /* Completion Queue. */
|
||||
struct ibv_exp_wq *wq; /* Work Queue. */
|
||||
struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
|
||||
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
|
||||
struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
|
||||
#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
|
||||
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
int32_t (*poll)(); /* Verbs poll function. */
|
||||
int32_t (*recv)(); /* Verbs receive function. */
|
||||
unsigned int port_id; /* Port ID for incoming packets. */
|
||||
unsigned int elts_n; /* (*elts)[] length. */
|
||||
unsigned int elts_head; /* Current index in (*elts)[]. */
|
||||
@ -130,6 +126,12 @@ struct rxq {
|
||||
struct ibv_exp_res_domain *rd; /* Resource Domain. */
|
||||
struct fdir_queue fdir_queue; /* Flow director queue. */
|
||||
struct ibv_mr *mr; /* Memory Region (for mp). */
|
||||
struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
|
||||
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
|
||||
struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
|
||||
#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
|
||||
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
|
||||
};
|
||||
|
||||
/* Hash RX queue types. */
|
||||
@ -248,6 +250,15 @@ typedef uint8_t linear_t[16384];
|
||||
/* TX queue descriptor. */
|
||||
struct txq {
|
||||
struct priv *priv; /* Back pointer to private data. */
|
||||
int32_t (*poll_cnt)(struct ibv_cq *cq, uint32_t max);
|
||||
int (*send_pending)();
|
||||
#if MLX5_PMD_MAX_INLINE > 0
|
||||
int (*send_pending_inline)();
|
||||
#endif
|
||||
#if MLX5_PMD_SGE_WR_N > 1
|
||||
int (*send_pending_sg_list)();
|
||||
#endif
|
||||
int (*send_flush)(struct ibv_qp *qp);
|
||||
struct ibv_cq *cq; /* Completion Queue. */
|
||||
struct ibv_qp *qp; /* Queue Pair. */
|
||||
struct txq_elt (*elts)[]; /* TX elements. */
|
||||
|
@ -187,6 +187,11 @@ txq_cleanup(struct txq *txq)
|
||||
|
||||
DEBUG("cleaning up %p", (void *)txq);
|
||||
txq_free_elts(txq);
|
||||
txq->poll_cnt = NULL;
|
||||
#if MLX5_PMD_MAX_INLINE > 0
|
||||
txq->send_pending_inline = NULL;
|
||||
#endif
|
||||
txq->send_flush = NULL;
|
||||
if (txq->if_qp != NULL) {
|
||||
assert(txq->priv != NULL);
|
||||
assert(txq->priv->ctx != NULL);
|
||||
@ -414,6 +419,15 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
|
||||
DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
|
||||
txq_cleanup(txq);
|
||||
*txq = tmpl;
|
||||
txq->poll_cnt = txq->if_cq->poll_cnt;
|
||||
#if MLX5_PMD_MAX_INLINE > 0
|
||||
txq->send_pending_inline = txq->if_qp->send_pending_inline;
|
||||
#endif
|
||||
#if MLX5_PMD_SGE_WR_N > 1
|
||||
txq->send_pending_sg_list = txq->if_qp->send_pending_sg_list;
|
||||
#endif
|
||||
txq->send_pending = txq->if_qp->send_pending;
|
||||
txq->send_flush = txq->if_qp->send_flush;
|
||||
DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
|
||||
/* Pre-register known mempools. */
|
||||
rte_mempool_walk(txq_mp2mr_iter, txq);
|
||||
|
Loading…
x
Reference in New Issue
Block a user