net/mlx5: fix tunnel offloads cap query

The query for the tunnel stateless offloads is wrongly implemented
because of:

1. It was using the device id to query for the offloads.
2. It was using a compilation flag for Verbs which no longer exits.

The main reason was lack of proper API from Verbs.

Fixing the query to use rdma-core API. The capability returned from
rdma-core refer to both Tx and Rx sides.
Eventhough there is a separate cap for GRE and VXLAN, implementation merge
them into a single flag in order to simplify the checks on the data
path.

Fixes: 43e9d9794cde ("net/mlx5: support upstream rdma-core")
Fixes: f5fde5205101 ("net/mlx5: add hardware checksum offload for tunnel packets")
Cc: stable@dpdk.org

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Xueming Li <xuemingl@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
This commit is contained in:
Shahaf Shuler 2018-02-25 09:28:37 +02:00 committed by Ferruh Yigit
parent c55a166795
commit 038e72511f
4 changed files with 21 additions and 30 deletions

View File

@ -125,9 +125,9 @@ mlx5_autoconf.h.new: FORCE
mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVICE_VXLAN_SUPPORT \
infiniband/verbs.h \
enum IBV_DEVICE_VXLAN_SUPPORT \
HAVE_IBV_DEVICE_TUNNEL_SUPPORT \
infiniband/mlx5dv.h \
enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_IBV_WQ_FLAG_RX_END_PADDING \

View File

@ -584,7 +584,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
unsigned int tunnel_en = 0;
int idx;
int i;
struct mlx5dv_context attrs_out;
struct mlx5dv_context attrs_out = {0};
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
struct ibv_counter_set_description cs_desc;
#endif
@ -633,20 +633,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
(pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
switch (pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
tunnel_en = 1;
break;
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
tunnel_en = 1;
break;
default:
break;
}
INFO("PCI information matches, using device \"%s\""
" (SR-IOV: %s)",
list[i]->name,
@ -675,6 +661,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
* Multi-packet send is supported by ConnectX-4 Lx PF as well
* as all ConnectX-5 devices.
*/
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
#endif
mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
@ -693,6 +682,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
cqe_comp = 0;
else
cqe_comp = 1;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
tunnel_en = ((attrs_out.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
(attrs_out.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
}
DEBUG("Tunnel offloading is %ssupported", tunnel_en ? "" : "not ");
#else
WARN("Tunnel offloading disabled due to old OFED/rdma-core version");
#endif
if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr))
goto error;
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
@ -838,15 +838,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
IBV_DEVICE_RAW_IP_CSUM);
DEBUG("checksum offloading is %ssupported",
(config.hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
config.hw_csum_l2tun =
!!(exp_device_attr.exp_device_cap_flags &
IBV_DEVICE_VXLAN_SUPPORT);
#endif
DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
(config.hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
config.flow_counter_en = !!(device_attr.max_counter_sets);
mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);

View File

@ -75,13 +75,13 @@ TAILQ_HEAD(mlx5_flows, rte_flow);
*/
struct mlx5_dev_config {
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
unsigned int mps:2; /* Multi-packet send supported mode. */
unsigned int tunnel_en:1; /* Whether tunnel is supported. */
unsigned int tunnel_en:1;
/* Whether tunnel stateless offloads are supported. */
unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int tso:1; /* Whether TSO is supported. */

View File

@ -1006,7 +1006,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
/* Toggle RX checksum offload if hardware supports it. */
tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
priv->config.hw_csum_l2tun);
priv->config.tunnel_en);
tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
/* Configure VLAN stripping. */
tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);