net/mlx5: update prerequisites for upcoming enhancements

The latest version of Mellanox OFED exposes hardware definitions necessary
to implement data path operation bypassing Verbs. Update the minimum
version requirement to MLNX_OFED >= 3.3 and clean up compatibility checks
for previous releases.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
Nélio Laranjeiro 2016-06-24 15:17:48 +02:00 committed by Bruce Richardson
parent 0cdddf4d06
commit 36271e7446
12 changed files with 18 additions and 171 deletions

View File

@ -125,16 +125,6 @@ These options can be modified in the ``.config`` file.
Environment variables
~~~~~~~~~~~~~~~~~~~~~
- ``MLX5_ENABLE_CQE_COMPRESSION``
A nonzero value lets ConnectX-4 return smaller completion entries to
improve performance when PCI backpressure is detected. It is most useful
for scenarios involving heavy traffic on many queues.
Since the additional software logic necessary to handle this mode can
lower performance when there is no backpressure, it is not enabled by
default.
- ``MLX5_PMD_ENABLE_PADDING``
Enables HW packet padding in PCI bus transactions.
@ -211,40 +201,12 @@ DPDK and must be installed separately:
Currently supported by DPDK:
- Mellanox OFED **3.1-1.0.3**, **3.1-1.5.7.1** or **3.2-2.0.0.0** depending
on usage.
The following features are supported with version **3.1-1.5.7.1** and
above only:
- IPv6, UPDv6, TCPv6 RSS.
- RX checksum offloads.
- IBM POWER8.
The following features are supported with version **3.2-2.0.0.0** and
above only:
- Flow director.
- RX VLAN stripping.
- TX VLAN insertion.
- RX CRC stripping configuration.
- Mellanox OFED **3.3-1.0.0.0**.
- Minimum firmware version:
With MLNX_OFED **3.1-1.0.3**:
- ConnectX-4: **12.12.1240**
- ConnectX-4 Lx: **14.12.1100**
With MLNX_OFED **3.1-1.5.7.1**:
- ConnectX-4: **12.13.0144**
- ConnectX-4 Lx: **14.13.0144**
With MLNX_OFED **3.2-2.0.0.0**:
- ConnectX-4: **12.14.2036**
- ConnectX-4 Lx: **14.14.2036**
- ConnectX-4: **12.16.1006**
- ConnectX-4 Lx: **14.16.1006**
Getting Mellanox OFED
~~~~~~~~~~~~~~~~~~~~~

View File

@ -105,44 +105,21 @@ mlx5_autoconf.h.new: FORCE
mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
HAVE_EXP_QUERY_DEVICE \
infiniband/verbs.h \
type 'struct ibv_exp_device_attr' $(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_FLOW_SPEC_IPV6 \
infiniband/verbs.h \
type 'struct ibv_exp_flow_spec_ipv6' $(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR \
infiniband/verbs.h \
enum IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS \
infiniband/verbs.h \
enum IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_EXP_CQ_RX_TCP_PACKET \
infiniband/verbs.h \
enum IBV_EXP_CQ_RX_TCP_PACKET \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_VERBS_FCS \
infiniband/verbs.h \
enum IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_VERBS_RX_END_PADDING \
infiniband/verbs.h \
enum IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_VERBS_VLAN_INSERTION \
infiniband/verbs.h \
enum IBV_EXP_RECEIVE_WQ_CVLAN_INSERTION \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
infiniband/verbs_exp.h \
enum IBV_EXP_CQ_COMPRESSED_CQE \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE \
infiniband/mlx5_hw.h \
enum MLX5_ETH_VLAN_INLINE_HEADER_SIZE \
$(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.

View File

@ -195,17 +195,13 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.mac_addr_add = mlx5_mac_addr_add,
.mac_addr_set = mlx5_mac_addr_set,
.mtu_set = mlx5_dev_set_mtu,
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
.reta_update = mlx5_dev_rss_reta_update,
.reta_query = mlx5_dev_rss_reta_query,
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
#ifdef MLX5_FDIR_SUPPORT
.filter_ctrl = mlx5_dev_filter_ctrl,
#endif /* MLX5_FDIR_SUPPORT */
};
static struct {
@ -352,24 +348,16 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_pd *pd = NULL;
struct priv *priv = NULL;
struct rte_eth_dev *eth_dev;
#ifdef HAVE_EXP_QUERY_DEVICE
struct ibv_exp_device_attr exp_device_attr;
#endif /* HAVE_EXP_QUERY_DEVICE */
struct ether_addr mac;
uint16_t num_vfs = 0;
#ifdef HAVE_EXP_QUERY_DEVICE
exp_device_attr.comp_mask =
IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
IBV_EXP_DEVICE_ATTR_RX_HASH |
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
#ifdef HAVE_VERBS_RX_END_PADDING
IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
#endif /* HAVE_VERBS_RX_END_PADDING */
0;
#endif /* HAVE_EXP_QUERY_DEVICE */
DEBUG("using port %u (%08" PRIx32 ")", port, test);
@ -420,7 +408,6 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
#ifdef HAVE_EXP_QUERY_DEVICE
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
goto port_error;
@ -446,30 +433,20 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
DEBUG("maximum RX indirection table size is %u",
priv->ind_table_max_size);
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &
IBV_EXP_RECEIVE_WQ_CVLAN_STRIP);
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
DEBUG("VLAN stripping is %ssupported",
(priv->hw_vlan_strip ? "" : "not "));
#ifdef HAVE_VERBS_FCS
priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags &
IBV_EXP_DEVICE_SCATTER_FCS);
#endif /* HAVE_VERBS_FCS */
DEBUG("FCS stripping configuration is %ssupported",
(priv->hw_fcs_strip ? "" : "not "));
#ifdef HAVE_VERBS_RX_END_PADDING
priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align;
#endif /* HAVE_VERBS_RX_END_PADDING */
DEBUG("hardware RX end alignment padding is %ssupported",
(priv->hw_padding ? "" : "not "));
#else /* HAVE_EXP_QUERY_DEVICE */
priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
#endif /* HAVE_EXP_QUERY_DEVICE */
priv_get_num_vfs(priv, &num_vfs);
priv->sriov = (num_vfs || sriov);
priv->mps = mps;

View File

@ -68,6 +68,11 @@
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#if !defined(HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE) || \
!defined(HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE)
#error Mellanox OFED >= 3.3 is required, please refer to the documentation.
#endif
enum {
PCI_VENDOR_ID_MELLANOX = 0x15b3,
};

View File

@ -76,13 +76,4 @@
/* Alarm timeout. */
#define MLX5_ALARM_TIMEOUT_US 100000
/*
* Extended flow priorities necessary to support flow director are available
* since MLNX_OFED 3.2. Considering this version adds support for VLAN
* offloads as well, their availability means flow director can be used.
*/
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
#define MLX5_FDIR_SUPPORT 1
#endif
#endif /* RTE_PMD_MLX5_DEFS_H_ */

View File

@ -122,7 +122,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
desc->type = HASH_RXQ_IPV4;
break;
#ifdef HAVE_FLOW_SPEC_IPV6
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
desc->type = HASH_RXQ_UDPV6;
break;
@ -132,7 +131,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
desc->type = HASH_RXQ_IPV6;
break;
#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
@ -147,7 +145,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
break;
#ifdef HAVE_FLOW_SPEC_IPV6
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
@ -161,7 +158,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.ipv6_flow.dst_ip,
sizeof(desc->dst_ip));
break;
#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
@ -211,7 +207,6 @@ priv_fdir_overlap(const struct priv *priv,
(desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
return 0;
break;
#ifdef HAVE_FLOW_SPEC_IPV6
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
case HASH_RXQ_TCPV6:
@ -222,7 +217,6 @@ priv_fdir_overlap(const struct priv *priv,
(desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
return 0;
break;
#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
@ -258,9 +252,7 @@ priv_fdir_flow_add(struct priv *priv,
uintptr_t spec_offset = (uintptr_t)&data->spec;
struct ibv_exp_flow_spec_eth *spec_eth;
struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
#ifdef HAVE_FLOW_SPEC_IPV6
struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
#endif /* HAVE_FLOW_SPEC_IPV6 */
struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
struct mlx5_fdir_filter *iter_fdir_filter;
unsigned int i;
@ -334,7 +326,6 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_ipv4->size;
break;
#ifdef HAVE_FLOW_SPEC_IPV6
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
case HASH_RXQ_TCPV6:
@ -368,7 +359,6 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_ipv6->size;
break;
#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
ERROR("invalid flow attribute type");
return EINVAL;

View File

@ -67,11 +67,9 @@ static const struct special_flow_init special_flow_init[] = {
1 << HASH_RXQ_TCPV4 |
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
#ifdef HAVE_FLOW_SPEC_IPV6
1 << HASH_RXQ_TCPV6 |
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
#endif /* HAVE_FLOW_SPEC_IPV6 */
1 << HASH_RXQ_ETH |
0,
.per_vlan = 0,
@ -82,10 +80,8 @@ static const struct special_flow_init special_flow_init[] = {
.hash_types =
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
#ifdef HAVE_FLOW_SPEC_IPV6
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
#endif /* HAVE_FLOW_SPEC_IPV6 */
1 << HASH_RXQ_ETH |
0,
.per_vlan = 0,
@ -96,15 +92,12 @@ static const struct special_flow_init special_flow_init[] = {
.hash_types =
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
#ifdef HAVE_FLOW_SPEC_IPV6
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
#endif /* HAVE_FLOW_SPEC_IPV6 */
1 << HASH_RXQ_ETH |
0,
.per_vlan = 1,
},
#ifdef HAVE_FLOW_SPEC_IPV6
[HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
.dst_mac_val = "\x33\x33\x00\x00\x00\x00",
.dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
@ -115,7 +108,6 @@ static const struct special_flow_init special_flow_init[] = {
0,
.per_vlan = 1,
},
#endif /* HAVE_FLOW_SPEC_IPV6 */
};
/**

View File

@ -105,7 +105,6 @@ const struct hash_rxq_init hash_rxq_init[] = {
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
},
#ifdef HAVE_FLOW_SPEC_IPV6
[HASH_RXQ_TCPV6] = {
.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
IBV_EXP_RX_HASH_DST_IPV6 |
@ -144,7 +143,6 @@ const struct hash_rxq_init hash_rxq_init[] = {
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
},
#endif /* HAVE_FLOW_SPEC_IPV6 */
[HASH_RXQ_ETH] = {
.hash_fields = 0,
.dpdk_rss_hf = 0,
@ -168,17 +166,11 @@ static const struct ind_table_init ind_table_init[] = {
1 << HASH_RXQ_TCPV4 |
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
#ifdef HAVE_FLOW_SPEC_IPV6
1 << HASH_RXQ_TCPV6 |
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
#endif /* HAVE_FLOW_SPEC_IPV6 */
0,
#ifdef HAVE_FLOW_SPEC_IPV6
.hash_types_n = 6,
#else /* HAVE_FLOW_SPEC_IPV6 */
.hash_types_n = 3,
#endif /* HAVE_FLOW_SPEC_IPV6 */
},
{
.max_size = 1,
@ -243,12 +235,8 @@ priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,
init = &hash_rxq_init[type];
*flow_attr = (struct ibv_exp_flow_attr){
.type = IBV_EXP_FLOW_ATTR_NORMAL,
#ifdef MLX5_FDIR_SUPPORT
/* Priorities < 3 are reserved for flow director. */
.priority = init->flow_priority + 3,
#else /* MLX5_FDIR_SUPPORT */
.priority = init->flow_priority,
#endif /* MLX5_FDIR_SUPPORT */
.num_of_specs = 0,
.port = priv->port,
.flags = 0,
@ -589,9 +577,7 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
case HASH_RXQ_FLOW_TYPE_ALLMULTI:
return !!priv->allmulti_req;
case HASH_RXQ_FLOW_TYPE_BROADCAST:
#ifdef HAVE_FLOW_SPEC_IPV6
case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
#endif /* HAVE_FLOW_SPEC_IPV6 */
/* If allmulti is enabled, broadcast and ipv6multi
* are unnecessary. */
return !priv->allmulti_req;
@ -1040,19 +1026,13 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc,
.cq = tmpl.rxq.cq,
.comp_mask =
IBV_EXP_CREATE_WQ_RES_DOMAIN |
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
IBV_EXP_CREATE_WQ_VLAN_OFFLOADS |
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
0,
.res_domain = tmpl.rd,
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
.vlan_offloads = (tmpl.rxq.vlan_strip ?
IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :
0),
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
};
#ifdef HAVE_VERBS_FCS
/* By default, FCS (CRC) is stripped by hardware. */
if (dev->data->dev_conf.rxmode.hw_strip_crc) {
tmpl.rxq.crc_present = 0;
@ -1073,9 +1053,6 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc,
(void *)dev,
tmpl.rxq.crc_present ? "disabled" : "enabled",
tmpl.rxq.crc_present << 2);
#endif /* HAVE_VERBS_FCS */
#ifdef HAVE_VERBS_RX_END_PADDING
if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING"))
; /* Nothing else to do. */
else if (priv->hw_padding) {
@ -1088,7 +1065,6 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc,
" supported, make sure MLNX_OFED and firmware are"
" up to date",
(void *)dev);
#endif /* HAVE_VERBS_RX_END_PADDING */
tmpl.rxq.wq = ibv_exp_create_wq(priv->ctx, &attr.wq);
if (tmpl.rxq.wq == NULL) {
@ -1108,9 +1084,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc,
DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id);
attr.params = (struct ibv_exp_query_intf_params){
.intf_scope = IBV_EXP_INTF_GLOBAL,
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
.intf_version = 1,
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
.intf = IBV_EXP_INTF_CQ,
.obj = tmpl.rxq.cq,
};
@ -1166,11 +1140,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, uint16_t desc,
DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
assert(ret == 0);
/* Assign function in queue. */
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
rxq_ctrl->rxq.poll = rxq_ctrl->if_cq->poll_length_flags_cvlan;
#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
rxq_ctrl->rxq.poll = rxq_ctrl->if_cq->poll_length_flags;
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
rxq_ctrl->rxq.recv = rxq_ctrl->if_wq->recv_burst;
return 0;
error:

View File

@ -452,11 +452,9 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
TRANSPOSE(~flags,
IBV_EXP_CQ_RX_IP_CSUM_OK,
PKT_RX_IP_CKSUM_BAD);
#ifdef HAVE_EXP_CQ_RX_TCP_PACKET
/* Set L4 checksum flag only for TCP/UDP packets. */
if (flags &
(IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET))
#endif /* HAVE_EXP_CQ_RX_TCP_PACKET */
ol_flags |=
TRANSPOSE(~flags,
IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
@ -589,13 +587,11 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
seg->packet_type = rxq_cq_to_pkt_type(flags);
seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
seg->ol_flags |= PKT_RX_VLAN_PKT |
PKT_RX_VLAN_STRIPPED;
seg->vlan_tci = vlan_tci;
}
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
}
/* Return packet. */
*(pkts++) = seg;

View File

@ -120,11 +120,7 @@ struct rxq_ctrl {
struct fdir_queue fdir_queue; /* Flow director queue. */
struct ibv_mr *mr; /* Memory Region (for mp). */
struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
unsigned int socket; /* CPU socket ID for allocations. */
struct rxq rxq; /* Data path structure. */
};
@ -134,11 +130,9 @@ enum hash_rxq_type {
HASH_RXQ_TCPV4,
HASH_RXQ_UDPV4,
HASH_RXQ_IPV4,
#ifdef HAVE_FLOW_SPEC_IPV6
HASH_RXQ_TCPV6,
HASH_RXQ_UDPV6,
HASH_RXQ_IPV6,
#endif /* HAVE_FLOW_SPEC_IPV6 */
HASH_RXQ_ETH,
};
@ -169,9 +163,7 @@ struct hash_rxq_init {
} hdr;
struct ibv_exp_flow_spec_tcp_udp tcp_udp;
struct ibv_exp_flow_spec_ipv4 ipv4;
#ifdef HAVE_FLOW_SPEC_IPV6
struct ibv_exp_flow_spec_ipv6 ipv6;
#endif /* HAVE_FLOW_SPEC_IPV6 */
struct ibv_exp_flow_spec_eth eth;
} flow_spec; /* Flow specification template. */
const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */

View File

@ -376,13 +376,11 @@ txq_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, uint16_t desc,
#ifdef HAVE_VERBS_VLAN_INSERTION
.intf_version = 1,
#endif
#ifdef HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR
/* Enable multi-packet send if supported. */
.family_flags =
(priv->mps ?
IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR :
0),
#endif
};
tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
if (tmpl.if_qp == NULL) {

View File

@ -144,7 +144,6 @@ static void
priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
{
struct rxq *rxq = (*priv->rxqs)[idx];
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
struct ibv_exp_wq_attr mod;
uint16_t vlan_offloads =
(on ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0) |
@ -165,8 +164,6 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
return;
}
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
}