net/mlx5: remove inline Tx support
Inline TX will be fully managed by the PMD after Verbs is bypassed in the data path. Remove the current code until then. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
parent
97c784fb18
commit
0431c40f47
@ -207,7 +207,6 @@ CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS=1
|
|||||||
#
|
#
|
||||||
CONFIG_RTE_LIBRTE_MLX5_PMD=n
|
CONFIG_RTE_LIBRTE_MLX5_PMD=n
|
||||||
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
|
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
|
||||||
CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE=0
|
|
||||||
CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8
|
CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -114,16 +114,6 @@ These options can be modified in the ``.config`` file.
|
|||||||
adds additional run-time checks and debugging messages at the cost of
|
adds additional run-time checks and debugging messages at the cost of
|
||||||
lower performance.
|
lower performance.
|
||||||
|
|
||||||
- ``CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE`` (default **0**)
|
|
||||||
|
|
||||||
Amount of data to be inlined during TX operations. Improves latency.
|
|
||||||
Can improve PPS performance when PCI backpressure is detected and may be
|
|
||||||
useful for scenarios involving heavy traffic on many queues.
|
|
||||||
|
|
||||||
Since the additional software logic necessary to handle this mode can
|
|
||||||
lower performance when there is no backpressure, it is not enabled by
|
|
||||||
default.
|
|
||||||
|
|
||||||
- ``CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE`` (default **8**)
|
- ``CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE`` (default **8**)
|
||||||
|
|
||||||
Maximum number of cached memory pools (MPs) per TX queue. Each MP from
|
Maximum number of cached memory pools (MPs) per TX queue. Each MP from
|
||||||
|
@ -86,10 +86,6 @@ else
|
|||||||
CFLAGS += -DNDEBUG -UPEDANTIC
|
CFLAGS += -DNDEBUG -UPEDANTIC
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE
|
|
||||||
CFLAGS += -DMLX5_PMD_MAX_INLINE=$(CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE
|
ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE
|
||||||
CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE)
|
CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE)
|
||||||
endif
|
endif
|
||||||
|
@ -54,11 +54,6 @@
|
|||||||
/* RSS Indirection table size. */
|
/* RSS Indirection table size. */
|
||||||
#define RSS_INDIRECTION_TABLE_SIZE 256
|
#define RSS_INDIRECTION_TABLE_SIZE 256
|
||||||
|
|
||||||
/* Maximum size for inline data. */
|
|
||||||
#ifndef MLX5_PMD_MAX_INLINE
|
|
||||||
#define MLX5_PMD_MAX_INLINE 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
|
* Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
|
||||||
* from which buffers are to be transmitted will have to be mapped by this
|
* from which buffers are to be transmitted will have to be mapped by this
|
||||||
|
@ -329,58 +329,33 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|||||||
rte_prefetch0((volatile void *)
|
rte_prefetch0((volatile void *)
|
||||||
(uintptr_t)buf_next_addr);
|
(uintptr_t)buf_next_addr);
|
||||||
}
|
}
|
||||||
/* Put packet into send queue. */
|
/* Retrieve Memory Region key for this memory pool. */
|
||||||
#if MLX5_PMD_MAX_INLINE > 0
|
lkey = txq_mp2mr(txq, txq_mb2mp(buf));
|
||||||
if (length <= txq->max_inline) {
|
if (unlikely(lkey == (uint32_t)-1)) {
|
||||||
#ifdef HAVE_VERBS_VLAN_INSERTION
|
/* MR does not exist. */
|
||||||
if (insert_vlan)
|
DEBUG("%p: unable to get MP <-> MR"
|
||||||
err = txq->send_pending_inline_vlan
|
" association", (void *)txq);
|
||||||
(txq->qp,
|
/* Clean up TX element. */
|
||||||
(void *)addr,
|
elt->buf = NULL;
|
||||||
length,
|
goto stop;
|
||||||
send_flags,
|
|
||||||
&buf->vlan_tci);
|
|
||||||
else
|
|
||||||
#endif /* HAVE_VERBS_VLAN_INSERTION */
|
|
||||||
err = txq->send_pending_inline
|
|
||||||
(txq->qp,
|
|
||||||
(void *)addr,
|
|
||||||
length,
|
|
||||||
send_flags);
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Retrieve Memory Region key for this
|
|
||||||
* memory pool.
|
|
||||||
*/
|
|
||||||
lkey = txq_mp2mr(txq, txq_mb2mp(buf));
|
|
||||||
if (unlikely(lkey == (uint32_t)-1)) {
|
|
||||||
/* MR does not exist. */
|
|
||||||
DEBUG("%p: unable to get MP <-> MR"
|
|
||||||
" association", (void *)txq);
|
|
||||||
/* Clean up TX element. */
|
|
||||||
elt->buf = NULL;
|
|
||||||
goto stop;
|
|
||||||
}
|
|
||||||
#ifdef HAVE_VERBS_VLAN_INSERTION
|
|
||||||
if (insert_vlan)
|
|
||||||
err = txq->send_pending_vlan
|
|
||||||
(txq->qp,
|
|
||||||
addr,
|
|
||||||
length,
|
|
||||||
lkey,
|
|
||||||
send_flags,
|
|
||||||
&buf->vlan_tci);
|
|
||||||
else
|
|
||||||
#endif /* HAVE_VERBS_VLAN_INSERTION */
|
|
||||||
err = txq->send_pending
|
|
||||||
(txq->qp,
|
|
||||||
addr,
|
|
||||||
length,
|
|
||||||
lkey,
|
|
||||||
send_flags);
|
|
||||||
}
|
}
|
||||||
|
#ifdef HAVE_VERBS_VLAN_INSERTION
|
||||||
|
if (insert_vlan)
|
||||||
|
err = txq->send_pending_vlan
|
||||||
|
(txq->qp,
|
||||||
|
addr,
|
||||||
|
length,
|
||||||
|
lkey,
|
||||||
|
send_flags,
|
||||||
|
&buf->vlan_tci);
|
||||||
|
else
|
||||||
|
#endif /* HAVE_VERBS_VLAN_INSERTION */
|
||||||
|
err = txq->send_pending
|
||||||
|
(txq->qp,
|
||||||
|
addr,
|
||||||
|
length,
|
||||||
|
lkey,
|
||||||
|
send_flags);
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
goto stop;
|
goto stop;
|
||||||
#ifdef MLX5_PMD_SOFT_COUNTERS
|
#ifdef MLX5_PMD_SOFT_COUNTERS
|
||||||
|
@ -239,20 +239,11 @@ struct txq {
|
|||||||
int (*send_pending)();
|
int (*send_pending)();
|
||||||
#ifdef HAVE_VERBS_VLAN_INSERTION
|
#ifdef HAVE_VERBS_VLAN_INSERTION
|
||||||
int (*send_pending_vlan)();
|
int (*send_pending_vlan)();
|
||||||
#endif
|
|
||||||
#if MLX5_PMD_MAX_INLINE > 0
|
|
||||||
int (*send_pending_inline)();
|
|
||||||
#ifdef HAVE_VERBS_VLAN_INSERTION
|
|
||||||
int (*send_pending_inline_vlan)();
|
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
int (*send_flush)(struct ibv_qp *qp);
|
int (*send_flush)(struct ibv_qp *qp);
|
||||||
struct ibv_cq *cq; /* Completion Queue. */
|
struct ibv_cq *cq; /* Completion Queue. */
|
||||||
struct ibv_qp *qp; /* Queue Pair. */
|
struct ibv_qp *qp; /* Queue Pair. */
|
||||||
struct txq_elt (*elts)[]; /* TX elements. */
|
struct txq_elt (*elts)[]; /* TX elements. */
|
||||||
#if MLX5_PMD_MAX_INLINE > 0
|
|
||||||
uint32_t max_inline; /* Max inline send size <= MLX5_PMD_MAX_INLINE. */
|
|
||||||
#endif
|
|
||||||
unsigned int elts_n; /* (*elts)[] length. */
|
unsigned int elts_n; /* (*elts)[] length. */
|
||||||
unsigned int elts_head; /* Current index in (*elts)[]. */
|
unsigned int elts_head; /* Current index in (*elts)[]. */
|
||||||
unsigned int elts_tail; /* First element awaiting completion. */
|
unsigned int elts_tail; /* First element awaiting completion. */
|
||||||
|
@ -173,9 +173,6 @@ txq_cleanup(struct txq *txq)
|
|||||||
DEBUG("cleaning up %p", (void *)txq);
|
DEBUG("cleaning up %p", (void *)txq);
|
||||||
txq_free_elts(txq);
|
txq_free_elts(txq);
|
||||||
txq->poll_cnt = NULL;
|
txq->poll_cnt = NULL;
|
||||||
#if MLX5_PMD_MAX_INLINE > 0
|
|
||||||
txq->send_pending_inline = NULL;
|
|
||||||
#endif
|
|
||||||
txq->send_flush = NULL;
|
txq->send_flush = NULL;
|
||||||
if (txq->if_qp != NULL) {
|
if (txq->if_qp != NULL) {
|
||||||
assert(txq->priv != NULL);
|
assert(txq->priv != NULL);
|
||||||
@ -282,7 +279,8 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
|
|||||||
.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
|
.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
|
||||||
.res_domain = tmpl.rd,
|
.res_domain = tmpl.rd,
|
||||||
};
|
};
|
||||||
tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
|
tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0,
|
||||||
|
&attr.cq);
|
||||||
if (tmpl.cq == NULL) {
|
if (tmpl.cq == NULL) {
|
||||||
ret = ENOMEM;
|
ret = ENOMEM;
|
||||||
ERROR("%p: CQ creation failure: %s",
|
ERROR("%p: CQ creation failure: %s",
|
||||||
@ -305,9 +303,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
|
|||||||
desc),
|
desc),
|
||||||
/* Max number of scatter/gather elements in a WR. */
|
/* Max number of scatter/gather elements in a WR. */
|
||||||
.max_send_sge = 1,
|
.max_send_sge = 1,
|
||||||
#if MLX5_PMD_MAX_INLINE > 0
|
|
||||||
.max_inline_data = MLX5_PMD_MAX_INLINE,
|
|
||||||
#endif
|
|
||||||
},
|
},
|
||||||
.qp_type = IBV_QPT_RAW_PACKET,
|
.qp_type = IBV_QPT_RAW_PACKET,
|
||||||
/* Do *NOT* enable this, completions events are managed per
|
/* Do *NOT* enable this, completions events are managed per
|
||||||
@ -325,10 +320,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
|
|||||||
(void *)dev, strerror(ret));
|
(void *)dev, strerror(ret));
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
#if MLX5_PMD_MAX_INLINE > 0
|
|
||||||
/* ibv_create_qp() updates this value. */
|
|
||||||
tmpl.max_inline = attr.init.cap.max_inline_data;
|
|
||||||
#endif
|
|
||||||
attr.mod = (struct ibv_exp_qp_attr){
|
attr.mod = (struct ibv_exp_qp_attr){
|
||||||
/* Move the QP to this state. */
|
/* Move the QP to this state. */
|
||||||
.qp_state = IBV_QPS_INIT,
|
.qp_state = IBV_QPS_INIT,
|
||||||
@ -403,12 +394,6 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
|
|||||||
txq_cleanup(txq);
|
txq_cleanup(txq);
|
||||||
*txq = tmpl;
|
*txq = tmpl;
|
||||||
txq->poll_cnt = txq->if_cq->poll_cnt;
|
txq->poll_cnt = txq->if_cq->poll_cnt;
|
||||||
#if MLX5_PMD_MAX_INLINE > 0
|
|
||||||
txq->send_pending_inline = txq->if_qp->send_pending_inline;
|
|
||||||
#ifdef HAVE_VERBS_VLAN_INSERTION
|
|
||||||
txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan;
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
txq->send_pending = txq->if_qp->send_pending;
|
txq->send_pending = txq->if_qp->send_pending;
|
||||||
#ifdef HAVE_VERBS_VLAN_INSERTION
|
#ifdef HAVE_VERBS_VLAN_INSERTION
|
||||||
txq->send_pending_vlan = txq->if_qp->send_pending_vlan;
|
txq->send_pending_vlan = txq->if_qp->send_pending_vlan;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user