numam-dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c

169 lines
3.7 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
* Copyright 2017 Mellanox Technologies, Ltd
*/
#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
#include <infiniband/mlx5dv.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
#include <rte_mbuf.h>
#include <rte_mempool.h>
#include <rte_prefetch.h>
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rxtx_vec.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5_prm.h"
#if defined RTE_ARCH_X86_64
#include "mlx5_rxtx_vec_sse.h"
#elif defined RTE_ARCH_ARM64
#include "mlx5_rxtx_vec_neon.h"
#else
#error "This should not be compiled if SIMD instructions are not supported."
#endif
/**
* Skip error packets.
*
* @param rxq
* Pointer to RX queue structure.
* @param[out] pkts
* Array to store received packets.
* @param pkts_n
* Maximum number of packets in array.
*
* @return
* Number of packets successfully received (<= pkts_n).
*/
static uint16_t
rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
uint16_t n = 0;
unsigned int i;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t err_bytes = 0;
#endif
for (i = 0; i < pkts_n; ++i) {
struct rte_mbuf *pkt = pkts[i];
net/mlx5: extend Rx completion with error handling When WQEs are posted to the HW to receive packets, the PMD may receive a completion report with error from the HW, aka error CQE which is associated to a bad WQE. The error reason may be bad address, wrong lkey, small buffer size, etc. that can wrongly be configured by the PMD or by the user. Checking all the optional mistakes to prevent error CQEs doesn't make sense due to performance impacts, moreover, some error CQEs can be triggered because of the packets coming from the wire when the DPDK application has no any control. Most of the error CQE types change the RQ state to error state what causes all the next received packets to be dropped by the HW and to be completed with CQE flush error forever. The current solution detects these error CQEs and even reports the errors to the user by the statistics error counters but without recovery, so if the RQ inserted to the error state it never moves to ready state again and all the next packets ever will be dropped. Extend the error CQEs handling for recovery by moving the state to ready again, and rearranging all the RQ WQEs and the management variables appropriately. Sometimes the error CQE root cause is very hard to debug and even may be related to some corner cases which are not reproducible easily, hence a dump file with debug information will be created for the first number of error CQEs, this number can be configured by the PMD probe parameters. Cc: stable@dpdk.org Signed-off-by: Matan Azrad <matan@mellanox.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2019-05-30 10:20:36 +00:00
if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) {
#ifdef MLX5_PMD_SOFT_COUNTERS
err_bytes += PKT_LEN(pkt);
#endif
rte_pktmbuf_free_seg(pkt);
} else {
pkts[n++] = pkt;
}
}
rxq->stats.idropped += (pkts_n - n);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Correct counters of errored completions. */
rxq->stats.ipackets -= (pkts_n - n);
rxq->stats.ibytes -= err_bytes;
#endif
net/mlx5: extend Rx completion with error handling When WQEs are posted to the HW to receive packets, the PMD may receive a completion report with error from the HW, aka error CQE which is associated to a bad WQE. The error reason may be bad address, wrong lkey, small buffer size, etc. that can wrongly be configured by the PMD or by the user. Checking all the optional mistakes to prevent error CQEs doesn't make sense due to performance impacts, moreover, some error CQEs can be triggered because of the packets coming from the wire when the DPDK application has no any control. Most of the error CQE types change the RQ state to error state what causes all the next received packets to be dropped by the HW and to be completed with CQE flush error forever. The current solution detects these error CQEs and even reports the errors to the user by the statistics error counters but without recovery, so if the RQ inserted to the error state it never moves to ready state again and all the next packets ever will be dropped. Extend the error CQEs handling for recovery by moving the state to ready again, and rearranging all the RQ WQEs and the management variables appropriately. Sometimes the error CQE root cause is very hard to debug and even may be related to some corner cases which are not reproducible easily, hence a dump file with debug information will be created for the first number of error CQEs, this number can be configured by the PMD probe parameters. Cc: stable@dpdk.org Signed-off-by: Matan Azrad <matan@mellanox.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2019-05-30 10:20:36 +00:00
mlx5_rx_err_handle(rxq, 1);
return n;
}
/**
* DPDK callback for vectorized RX.
*
* @param dpdk_rxq
* Generic pointer to RX queue structure.
* @param[out] pkts
* Array to store received packets.
* @param pkts_n
* Maximum number of packets in array.
*
* @return
* Number of packets successfully received (<= pkts_n).
*/
uint16_t
mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct mlx5_rxq_data *rxq = dpdk_rxq;
uint16_t nb_rx;
uint64_t err = 0;
nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
net/mlx5: extend Rx completion with error handling When WQEs are posted to the HW to receive packets, the PMD may receive a completion report with error from the HW, aka error CQE which is associated to a bad WQE. The error reason may be bad address, wrong lkey, small buffer size, etc. that can wrongly be configured by the PMD or by the user. Checking all the optional mistakes to prevent error CQEs doesn't make sense due to performance impacts, moreover, some error CQEs can be triggered because of the packets coming from the wire when the DPDK application has no any control. Most of the error CQE types change the RQ state to error state what causes all the next received packets to be dropped by the HW and to be completed with CQE flush error forever. The current solution detects these error CQEs and even reports the errors to the user by the statistics error counters but without recovery, so if the RQ inserted to the error state it never moves to ready state again and all the next packets ever will be dropped. Extend the error CQEs handling for recovery by moving the state to ready again, and rearranging all the RQ WQEs and the management variables appropriately. Sometimes the error CQE root cause is very hard to debug and even may be related to some corner cases which are not reproducible easily, hence a dump file with debug information will be created for the first number of error CQEs, this number can be configured by the PMD probe parameters. Cc: stable@dpdk.org Signed-off-by: Matan Azrad <matan@mellanox.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2019-05-30 10:20:36 +00:00
if (unlikely(err | rxq->err_state))
nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
return nb_rx;
}
/**
* Check a RX queue can support vectorized RX.
*
* @param rxq
* Pointer to RX queue.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
return -ENOTSUP;
if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
/**
* Check a device can support vectorized RX.
*
* @param dev
* Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint16_t i;
if (!priv->config.rx_vec_en)
return -ENOTSUP;
if (mlx5_mprq_enabled(dev))
return -ENOTSUP;
if (mlx5_lro_on(dev))
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
if (!rxq)
continue;
if (mlx5_rxq_check_vec_support(rxq) < 0)
break;
}
if (i != priv->rxqs_n)
return -ENOTSUP;
return 1;
}