net/mlx5: zero LRO mbuf headroom

LRO packet may consume all the stride memory, hence the PMD cannot
guaranty head-room for the LRO mbuf.

The issue is lack in HW support to write the packet in offset from the
stride start.

A new striding RQ feature may be added in CX6 DX to allow head-room and
tail-room for the LRO strides.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
This commit is contained in:
Matan Azrad 2019-07-22 14:52:23 +00:00 committed by Ferruh Yigit
parent e4c2a16eb1
commit a496e09317
4 changed files with 18 additions and 8 deletions

View File

@ -165,6 +165,7 @@ Limitations
- LRO:
- No mbuf headroom space is created for RX packets when LRO is configured.
- ``scatter_fcs`` is disabled when LRO is configured.
Statistics

View File

@ -1565,6 +1565,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
unsigned int mprq_stride_size;
struct mlx5_dev_config *config = &priv->config;
/*
* LRO packet may consume all the stride memory, hence we cannot
* guaranty head-room. A new striding RQ feature may be added in CX6 DX
* to allow head-room and tail-room for the LRO packets.
*/
unsigned int strd_headroom_en = mlx5_lro_on(dev) ? 0 : 1;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
@ -1600,9 +1606,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
* stride.
* Otherwise, enable Rx scatter if necessary.
*/
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
assert(mb_len >= RTE_PKTMBUF_HEADROOM * strd_headroom_en);
mprq_stride_size = dev->data->dev_conf.rxmode.max_rx_pkt_len +
RTE_PKTMBUF_HEADROOM;
RTE_PKTMBUF_HEADROOM * strd_headroom_en;
if (mprq_en &&
desc > (1U << config->mprq.stride_num_n) &&
mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
@ -1614,9 +1620,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
config->mprq.min_stride_size_n);
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
tmpl->rxq.mprq_max_memcpy_len =
RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
config->mprq.max_memcpy_len);
tmpl->rxq.strd_headroom_en = strd_headroom_en;
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
DRV_LOG(DEBUG,
"port %u Rx queue %u: Multi-Packet RQ is enabled"
" strd_num_n = %u, strd_sz_n = %u",

View File

@ -1540,6 +1540,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
unsigned int i = 0;
uint32_t rq_ci = rxq->rq_ci;
uint16_t consumed_strd = rxq->consumed_strd;
uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
while (i < pkts_n) {
@ -1650,7 +1651,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rte_atomic16_add_return(&buf->refcnt, 1);
assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
buf_addr = RTE_PTR_SUB(addr, headroom_sz);
/*
* MLX5 device doesn't use iova but it is necessary in a
* case where the Rx packet is transmitted via a
@ -1668,7 +1669,8 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
*/
rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
buf_len, shinfo);
rte_pktmbuf_reset_headroom(pkt);
/* Set mbuf head-room. */
pkt->data_off = headroom_sz;
assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
/*
* Prevent potential overflow due to MTU change through

View File

@ -114,7 +114,8 @@ struct mlx5_rxq_data {
unsigned int strd_sz_n:4; /* Log 2 of stride size. */
unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
unsigned int :4; /* Remaining bits. */
unsigned int strd_headroom_en:1; /* Enable mbuf headroom in MPRQ. */
unsigned int :3; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;