net/mlx4: restore Rx offloads

This patch adds hardware offloading support for IPV4, UDP and TCP checksum
verification, including inner/outer checksums on supported tunnel types.

It also restores packet type recognition support.

Signed-off-by: Vasily Philipov <vasilyf@mellanox.com>
Signed-off-by: Moti Haimovsky <motih@mellanox.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
Moti Haimovsky 2017-10-12 14:29:59 +02:00 committed by Ferruh Yigit
parent 5db1d36408
commit 9f57340a80
6 changed files with 158 additions and 3 deletions

View File

@ -24,6 +24,7 @@ L3 checksum offload = Y
L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
Basic stats = Y
Stats per queue = Y
Other kdrv = Y

View File

@ -767,10 +767,14 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa = 0;
info->tx_offload_capa = 0;
if (priv->hw_csum)
if (priv->hw_csum) {
info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM);
}
if (priv->hw_csum_l2tun)
info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (mlx4_get_ifname(priv, &ifname) == 0)

View File

@ -70,6 +70,14 @@
#define MLX4_SIZE_TO_TXBBS(size) \
(RTE_ALIGN((size), (MLX4_TXBB_SIZE)) >> (MLX4_TXBB_SHIFT))
/* CQE checksum flags. */
enum {
MLX4_CQE_L2_TUNNEL_IPV4 = (int)(1u << 25),
MLX4_CQE_L2_TUNNEL_L4_CSUM = (int)(1u << 26),
MLX4_CQE_L2_TUNNEL = (int)(1u << 27),
MLX4_CQE_L2_TUNNEL_IPOK = (int)(1u << 31),
};
/* Send queue information. */
struct mlx4_sq {
uint8_t *buf; /**< SQ buffer. */
@ -119,4 +127,25 @@ mlx4_get_cqe(struct mlx4_cq *cq, uint32_t index)
(cq->cqe_64 << 5));
}
/**
* Transpose a flag in a value.
*
* @param val
* Input value.
* @param from
* Flag to retrieve from input value.
* @param to
* Flag to set in output value.
*
* @return
* Output value with transposed flag enabled if present on input.
*/
static inline uint64_t
mlx4_transpose(uint64_t val, uint64_t from, uint64_t to)
{
return (from >= to ?
(val & from) / (from / to) :
(val & from) * (to / from));
}
#endif /* MLX4_PRM_H_ */

View File

@ -464,6 +464,11 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.sges_n = 0,
.elts_n = rte_log2_u32(desc),
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
.csum = (priv->hw_csum &&
dev->data->dev_conf.rxmode.hw_ip_checksum),
.csum_l2tun = (priv->hw_csum_l2tun &&
dev->data->dev_conf.rxmode.hw_ip_checksum),
.stats.idx = idx,
.socket = socket,
};

View File

@ -556,6 +556,107 @@ stop:
return i;
}
/**
* Translate Rx completion flags to packet type.
*
* @param flags
* Rx completion flags returned by mlx4_cqe_flags().
*
* @return
* Packet type in mbuf format.
*/
static inline uint32_t
rxq_cq_to_pkt_type(uint32_t flags)
{
uint32_t pkt_type;
if (flags & MLX4_CQE_L2_TUNNEL)
pkt_type =
mlx4_transpose(flags,
MLX4_CQE_L2_TUNNEL_IPV4,
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
mlx4_transpose(flags,
MLX4_CQE_STATUS_IPV4_PKT,
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN);
else
pkt_type = mlx4_transpose(flags,
MLX4_CQE_STATUS_IPV4_PKT,
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
return pkt_type;
}
/**
* Translate Rx completion flags to offload flags.
*
* @param flags
* Rx completion flags returned by mlx4_cqe_flags().
* @param csum
* Whether Rx checksums are enabled.
* @param csum_l2tun
* Whether Rx L2 tunnel checksums are enabled.
*
* @return
* Offload flags (ol_flags) in mbuf format.
*/
static inline uint32_t
rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
{
uint32_t ol_flags = 0;
if (csum)
ol_flags |=
mlx4_transpose(flags,
MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
PKT_RX_IP_CKSUM_GOOD) |
mlx4_transpose(flags,
MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
PKT_RX_L4_CKSUM_GOOD);
if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
ol_flags |=
mlx4_transpose(flags,
MLX4_CQE_L2_TUNNEL_IPOK,
PKT_RX_IP_CKSUM_GOOD) |
mlx4_transpose(flags,
MLX4_CQE_L2_TUNNEL_L4_CSUM,
PKT_RX_L4_CKSUM_GOOD);
return ol_flags;
}
/**
* Extract checksum information from CQE flags.
*
* @param cqe
* Pointer to CQE structure.
* @param csum
* Whether Rx checksums are enabled.
* @param csum_l2tun
* Whether Rx L2 tunnel checksums are enabled.
*
* @return
* CQE checksum information.
*/
static inline uint32_t
mlx4_cqe_flags(struct mlx4_cqe *cqe, int csum, int csum_l2tun)
{
uint32_t flags = 0;
/*
* The relevant bits are in different locations on their
* CQE fields therefore we can join them in one 32bit
* variable.
*/
if (csum)
flags = (rte_be_to_cpu_32(cqe->status) &
MLX4_CQE_STATUS_IPV4_CSUM_OK);
if (csum_l2tun)
flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) &
(MLX4_CQE_L2_TUNNEL |
MLX4_CQE_L2_TUNNEL_IPOK |
MLX4_CQE_L2_TUNNEL_L4_CSUM |
MLX4_CQE_L2_TUNNEL_IPV4));
return flags;
}
/**
* Poll one CQE from CQ.
*
@ -664,8 +765,21 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
goto skip;
}
pkt = seg;
pkt->packet_type = 0;
pkt->ol_flags = 0;
if (rxq->csum | rxq->csum_l2tun) {
uint32_t flags =
mlx4_cqe_flags(cqe,
rxq->csum,
rxq->csum_l2tun);
pkt->ol_flags =
rxq_cq_to_ol_flags(flags,
rxq->csum,
rxq->csum_l2tun);
pkt->packet_type = rxq_cq_to_pkt_type(flags);
} else {
pkt->packet_type = 0;
pkt->ol_flags = 0;
}
pkt->pkt_len = len;
}
rep->nb_segs = 1;

View File

@ -78,6 +78,8 @@ struct rxq {
struct rte_mbuf *(*elts)[]; /**< Rx elements. */
volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */
volatile uint32_t *rq_db; /**< RQ doorbell record. */
uint32_t csum:1; /**< Enable checksum offloading. */
uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
struct mlx4_rxq_stats stats; /**< Rx queue counters. */
unsigned int socket; /**< CPU socket ID for allocations. */