net/mlx5: fix Tx checksum offloads
Tx checksum offloads are correctly handled in a single Tx burst function
whereas the capability is always set.
This causes VXLAN packet with checksum offloads request to be ignored when
the (E)MPS Tx functions are selected.
Fixes: f5fde52051
("net/mlx5: add hardware checksum offload for tunnel packets")
Cc: stable@dpdk.org
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
parent
42473d6782
commit
4aa15eb19a
@ -800,7 +800,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
|
||||
IBV_DEVICE_VXLAN_SUPPORT);
|
||||
#endif
|
||||
DEBUG("L2 tunnel checksum offloads are %ssupported",
|
||||
DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
|
||||
(priv->hw_csum_l2tun ? "" : "not "));
|
||||
|
||||
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
||||
|
@ -372,7 +372,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
|
||||
uint16_t tso_header_sz = 0;
|
||||
uint16_t ehdr;
|
||||
uint8_t cs_flags = 0;
|
||||
uint8_t cs_flags;
|
||||
uint64_t tso = 0;
|
||||
uint16_t tso_segsz = 0;
|
||||
#ifdef MLX5_PMD_SOFT_COUNTERS
|
||||
@ -415,23 +415,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
if (pkts_n - i > 1)
|
||||
rte_prefetch0(
|
||||
rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
|
||||
/* Should we enable HW CKSUM offload */
|
||||
if (buf->ol_flags &
|
||||
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
|
||||
const uint64_t is_tunneled = buf->ol_flags &
|
||||
(PKT_TX_TUNNEL_GRE |
|
||||
PKT_TX_TUNNEL_VXLAN);
|
||||
|
||||
if (is_tunneled && txq->tunnel_en) {
|
||||
cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||
if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
|
||||
cs_flags |= MLX5_ETH_WQE_L3_CSUM;
|
||||
} else {
|
||||
cs_flags = MLX5_ETH_WQE_L3_CSUM |
|
||||
MLX5_ETH_WQE_L4_CSUM;
|
||||
}
|
||||
}
|
||||
cs_flags = txq_ol_cksum_to_cs(txq, buf);
|
||||
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
|
||||
/* Replace the Ethernet type by the VLAN if necessary. */
|
||||
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
|
||||
@ -845,7 +829,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
struct rte_mbuf *buf = *(pkts++);
|
||||
uint32_t length;
|
||||
unsigned int segs_n = buf->nb_segs;
|
||||
uint32_t cs_flags = 0;
|
||||
uint32_t cs_flags;
|
||||
|
||||
/*
|
||||
* Make sure there is enough room to store this packet and
|
||||
@ -861,10 +845,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
}
|
||||
max_elts -= segs_n;
|
||||
--pkts_n;
|
||||
/* Should we enable HW CKSUM offload */
|
||||
if (buf->ol_flags &
|
||||
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
|
||||
cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
|
||||
cs_flags = txq_ol_cksum_to_cs(txq, buf);
|
||||
/* Retrieve packet information. */
|
||||
length = PKT_LEN(buf);
|
||||
assert(length);
|
||||
@ -1070,7 +1051,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
|
||||
uintptr_t addr;
|
||||
uint32_t length;
|
||||
unsigned int segs_n = buf->nb_segs;
|
||||
uint32_t cs_flags = 0;
|
||||
uint8_t cs_flags;
|
||||
|
||||
/*
|
||||
* Make sure there is enough room to store this packet and
|
||||
@ -1091,10 +1072,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
|
||||
* iteration.
|
||||
*/
|
||||
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
|
||||
/* Should we enable HW CKSUM offload */
|
||||
if (buf->ol_flags &
|
||||
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
|
||||
cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
|
||||
cs_flags = txq_ol_cksum_to_cs(txq, buf);
|
||||
/* Retrieve packet information. */
|
||||
length = PKT_LEN(buf);
|
||||
/* Start new session if packet differs. */
|
||||
@ -1363,7 +1341,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
unsigned int do_inline = 0; /* Whether inline is possible. */
|
||||
uint32_t length;
|
||||
unsigned int segs_n = buf->nb_segs;
|
||||
uint32_t cs_flags = 0;
|
||||
uint8_t cs_flags;
|
||||
|
||||
/*
|
||||
* Make sure there is enough room to store this packet and
|
||||
@ -1377,10 +1355,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
txq->stats.oerrors++;
|
||||
break;
|
||||
}
|
||||
/* Should we enable HW CKSUM offload. */
|
||||
if (buf->ol_flags &
|
||||
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
|
||||
cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
|
||||
cs_flags = txq_ol_cksum_to_cs(txq, buf);
|
||||
/* Retrieve packet information. */
|
||||
length = PKT_LEN(buf);
|
||||
/* Start new session if:
|
||||
|
@ -616,4 +616,38 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
|
||||
mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the Checksum offloads to Verbs.
|
||||
*
|
||||
* @param txq_data
|
||||
* Pointer to the Tx queue.
|
||||
* @param buf
|
||||
* Pointer to the mbuf.
|
||||
*
|
||||
* @return
|
||||
* the converted cs_flags.
|
||||
*/
|
||||
static __rte_always_inline uint8_t
|
||||
txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
|
||||
{
|
||||
uint8_t cs_flags = 0;
|
||||
|
||||
/* Should we enable HW CKSUM offload */
|
||||
if (buf->ol_flags &
|
||||
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
|
||||
if (txq_data->tunnel_en &&
|
||||
(buf->ol_flags &
|
||||
(PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
|
||||
cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||
if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
|
||||
cs_flags |= MLX5_ETH_WQE_L3_CSUM;
|
||||
} else {
|
||||
cs_flags = MLX5_ETH_WQE_L3_CSUM |
|
||||
MLX5_ETH_WQE_L4_CSUM;
|
||||
}
|
||||
}
|
||||
return cs_flags;
|
||||
}
|
||||
|
||||
#endif /* RTE_PMD_MLX5_RXTX_H_ */
|
||||
|
@ -123,24 +123,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
|
||||
for (pos = 1; pos < pkts_n; ++pos)
|
||||
if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
|
||||
break;
|
||||
/* Should open another MPW session for the rest. */
|
||||
if (pkts[0]->ol_flags &
|
||||
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
|
||||
const uint64_t is_tunneled =
|
||||
pkts[0]->ol_flags &
|
||||
(PKT_TX_TUNNEL_GRE |
|
||||
PKT_TX_TUNNEL_VXLAN);
|
||||
|
||||
if (is_tunneled && txq->tunnel_en) {
|
||||
*cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||
if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM)
|
||||
*cs_flags |= MLX5_ETH_WQE_L3_CSUM;
|
||||
} else {
|
||||
*cs_flags = MLX5_ETH_WQE_L3_CSUM |
|
||||
MLX5_ETH_WQE_L4_CSUM;
|
||||
}
|
||||
}
|
||||
*cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]);
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
@ -149,7 +149,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
|
||||
11, 10, 9, 8, /* bswap32 */
|
||||
12, 13, 14, 15
|
||||
};
|
||||
uint8_t cs_flags = 0;
|
||||
uint8_t cs_flags;
|
||||
uint16_t max_elts;
|
||||
uint16_t max_wqe;
|
||||
uint8x16_t *t_wqe;
|
||||
@ -168,22 +168,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
|
||||
break;
|
||||
wqe = &((volatile struct mlx5_wqe64 *)
|
||||
txq->wqes)[wqe_ci & wq_mask].hdr;
|
||||
if (buf->ol_flags &
|
||||
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
|
||||
const uint64_t is_tunneled =
|
||||
buf->ol_flags & (PKT_TX_TUNNEL_GRE |
|
||||
PKT_TX_TUNNEL_VXLAN);
|
||||
|
||||
if (is_tunneled && txq->tunnel_en) {
|
||||
cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||
if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
|
||||
cs_flags |= MLX5_ETH_WQE_L3_CSUM;
|
||||
} else {
|
||||
cs_flags = MLX5_ETH_WQE_L3_CSUM |
|
||||
MLX5_ETH_WQE_L4_CSUM;
|
||||
}
|
||||
}
|
||||
cs_flags = txq_ol_cksum_to_cs(txq, buf);
|
||||
/* Title WQEBB pointer. */
|
||||
t_wqe = (uint8x16_t *)wqe;
|
||||
dseg = (uint8_t *)(wqe + 1);
|
||||
|
@ -148,7 +148,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
|
||||
8, 9, 10, 11, /* bswap32 */
|
||||
4, 5, 6, 7, /* bswap32 */
|
||||
0, 1, 2, 3 /* bswap32 */);
|
||||
uint8_t cs_flags = 0;
|
||||
uint8_t cs_flags;
|
||||
uint16_t max_elts;
|
||||
uint16_t max_wqe;
|
||||
__m128i *t_wqe, *dseg;
|
||||
@ -170,22 +170,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
|
||||
}
|
||||
wqe = &((volatile struct mlx5_wqe64 *)
|
||||
txq->wqes)[wqe_ci & wq_mask].hdr;
|
||||
if (buf->ol_flags &
|
||||
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
|
||||
const uint64_t is_tunneled =
|
||||
buf->ol_flags & (PKT_TX_TUNNEL_GRE |
|
||||
PKT_TX_TUNNEL_VXLAN);
|
||||
|
||||
if (is_tunneled && txq->tunnel_en) {
|
||||
cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||
if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
|
||||
cs_flags |= MLX5_ETH_WQE_L3_CSUM;
|
||||
} else {
|
||||
cs_flags = MLX5_ETH_WQE_L3_CSUM |
|
||||
MLX5_ETH_WQE_L4_CSUM;
|
||||
}
|
||||
}
|
||||
cs_flags = txq_ol_cksum_to_cs(txq, buf);
|
||||
/* Title WQEBB pointer. */
|
||||
t_wqe = (__m128i *)wqe;
|
||||
dseg = (__m128i *)(wqe + 1);
|
||||
|
Loading…
Reference in New Issue
Block a user