net/mlx5: add device configuration structure

Move device configuration and features capabilities to its own structure.
This structure is filled by mlx5_pci_probe(), outside of this function
it should be treated as *read only*.

This configuration struct will be used for the Tx/Rx queue setup to
select the Tx/Rx queue parameters based on the user configuration and
device capabilities.
In addition it will be used by the burst selection function to decide
on the best pkt burst to be used.

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
This commit is contained in:
Shahaf Shuler 2018-01-10 11:16:58 +02:00 committed by Ferruh Yigit
parent 1cfa649ba6
commit 7fe24446e9
8 changed files with 253 additions and 236 deletions

View File

@ -94,9 +94,6 @@
/* Device parameter to enable hardware Rx vector. */ /* Device parameter to enable hardware Rx vector. */
#define MLX5_RX_VEC_EN "rx_vec_en" #define MLX5_RX_VEC_EN "rx_vec_en"
/* Default PMD specific parameter value. */
#define MLX5_ARG_UNSET (-1)
#ifndef HAVE_IBV_MLX5_MOD_MPW #ifndef HAVE_IBV_MLX5_MOD_MPW
#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@ -106,17 +103,6 @@
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif #endif
struct mlx5_args {
int cqe_comp;
int txq_inline;
int txqs_inline;
int mps;
int mpw_hdr_dseg;
int inline_max_packet_sz;
int tso;
int tx_vec_en;
int rx_vec_en;
};
/** /**
* Retrieve integer value from environment variable. * Retrieve integer value from environment variable.
* *
@ -399,7 +385,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
static int static int
mlx5_args_check(const char *key, const char *val, void *opaque) mlx5_args_check(const char *key, const char *val, void *opaque)
{ {
struct mlx5_args *args = opaque; struct mlx5_dev_config *config = opaque;
unsigned long tmp; unsigned long tmp;
errno = 0; errno = 0;
@ -409,23 +395,23 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
return errno; return errno;
} }
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
args->cqe_comp = !!tmp; config->cqe_comp = !!tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
args->txq_inline = tmp; config->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
args->txqs_inline = tmp; config->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
args->mps = !!tmp; config->mps = !!tmp ? config->mps : 0;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
args->mpw_hdr_dseg = !!tmp; config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
args->inline_max_packet_sz = tmp; config->inline_max_packet_sz = tmp;
} else if (strcmp(MLX5_TSO, key) == 0) { } else if (strcmp(MLX5_TSO, key) == 0) {
args->tso = !!tmp; config->tso = !!tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
args->tx_vec_en = !!tmp; config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
args->rx_vec_en = !!tmp; config->rx_vec_en = !!tmp;
} else { } else {
WARN("%s: unknown parameter", key); WARN("%s: unknown parameter", key);
return -EINVAL; return -EINVAL;
@ -436,8 +422,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
/** /**
* Parse device parameters. * Parse device parameters.
* *
* @param priv * @param config
* Pointer to private structure. * Pointer to device configuration structure.
* @param devargs * @param devargs
* Device arguments structure. * Device arguments structure.
* *
@ -445,7 +431,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* 0 on success, errno value on failure. * 0 on success, errno value on failure.
*/ */
static int static int
mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs) mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{ {
const char **params = (const char *[]){ const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN, MLX5_RXQ_CQE_COMP_EN,
@ -473,7 +459,7 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
for (i = 0; (params[i] != NULL); ++i) { for (i = 0; (params[i] != NULL); ++i) {
if (rte_kvargs_count(kvlist, params[i])) { if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i], ret = rte_kvargs_process(kvlist, params[i],
mlx5_args_check, args); mlx5_args_check, config);
if (ret != 0) { if (ret != 0) {
rte_kvargs_free(kvlist); rte_kvargs_free(kvlist);
return ret; return ret;
@ -486,38 +472,6 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
static struct rte_pci_driver mlx5_driver; static struct rte_pci_driver mlx5_driver;
/**
* Assign parameters from args into priv, only non default
* values are considered.
*
* @param[out] priv
* Pointer to private structure.
* @param[in] args
* Pointer to args values.
*/
static void
mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
{
if (args->cqe_comp != MLX5_ARG_UNSET)
priv->cqe_comp = args->cqe_comp;
if (args->txq_inline != MLX5_ARG_UNSET)
priv->txq_inline = args->txq_inline;
if (args->txqs_inline != MLX5_ARG_UNSET)
priv->txqs_inline = args->txqs_inline;
if (args->mps != MLX5_ARG_UNSET)
priv->mps = args->mps ? priv->mps : 0;
if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
priv->inline_max_packet_sz = args->inline_max_packet_sz;
if (args->tso != MLX5_ARG_UNSET)
priv->tso = args->tso;
if (args->tx_vec_en != MLX5_ARG_UNSET)
priv->tx_vec_en = args->tx_vec_en;
if (args->rx_vec_en != MLX5_ARG_UNSET)
priv->rx_vec_en = args->rx_vec_en;
}
/** /**
* DPDK callback to register a PCI device. * DPDK callback to register a PCI device.
* *
@ -671,16 +625,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ether_addr mac; struct ether_addr mac;
uint16_t num_vfs = 0; uint16_t num_vfs = 0;
struct ibv_device_attr_ex device_attr; struct ibv_device_attr_ex device_attr;
struct mlx5_args args = { struct mlx5_dev_config config = {
.cqe_comp = MLX5_ARG_UNSET, .cqe_comp = cqe_comp,
.mps = mps,
.tunnel_en = tunnel_en,
.tso = 0,
.tx_vec_en = 1,
.rx_vec_en = 1,
.mpw_hdr_dseg = 0,
.txq_inline = MLX5_ARG_UNSET, .txq_inline = MLX5_ARG_UNSET,
.txqs_inline = MLX5_ARG_UNSET, .txqs_inline = MLX5_ARG_UNSET,
.mps = MLX5_ARG_UNSET,
.mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET, .inline_max_packet_sz = MLX5_ARG_UNSET,
.tso = MLX5_ARG_UNSET,
.tx_vec_en = MLX5_ARG_UNSET,
.rx_vec_en = MLX5_ARG_UNSET,
}; };
mlx5_dev[idx].ports |= test; mlx5_dev[idx].ports |= test;
@ -779,106 +734,89 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port; priv->port = port;
priv->pd = pd; priv->pd = pd;
priv->mtu = ETHER_MTU; priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */ err = mlx5_args(&config, pci_dev->device.devargs);
priv->cqe_comp = cqe_comp;
priv->tunnel_en = tunnel_en;
/* Enable vector by default if supported. */
priv->tx_vec_en = 1;
priv->rx_vec_en = 1;
err = mlx5_args(&args, pci_dev->device.devargs);
if (err) { if (err) {
ERROR("failed to process device arguments: %s", ERROR("failed to process device arguments: %s",
strerror(err)); strerror(err));
goto port_error; goto port_error;
} }
mlx5_args_assign(priv, &args);
if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) { if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
ERROR("ibv_query_device_ex() failed"); ERROR("ibv_query_device_ex() failed");
goto port_error; goto port_error;
} }
priv->hw_csum = config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
!!(device_attr_ex.device_cap_flags_ex &
IBV_DEVICE_RAW_IP_CSUM); IBV_DEVICE_RAW_IP_CSUM);
DEBUG("checksum offloading is %ssupported", DEBUG("checksum offloading is %ssupported",
(priv->hw_csum ? "" : "not ")); (config.hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT #ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags & config.hw_csum_l2tun =
!!(exp_device_attr.exp_device_cap_flags &
IBV_DEVICE_VXLAN_SUPPORT); IBV_DEVICE_VXLAN_SUPPORT);
#endif #endif
DEBUG("Rx L2 tunnel checksum offloads are %ssupported", DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
(priv->hw_csum_l2tun ? "" : "not ")); (config.hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
priv->counter_set_supported = !!(device_attr.max_counter_sets); config.counter_set_supported =
!!(device_attr.max_counter_sets);
ibv_describe_counter_set(ctx, 0, &cs_desc); ibv_describe_counter_set(ctx, 0, &cs_desc);
DEBUG("counter type = %d, num of cs = %ld, attributes = %d", DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
cs_desc.counter_type, cs_desc.num_of_cs, cs_desc.counter_type, cs_desc.num_of_cs,
cs_desc.attributes); cs_desc.attributes);
#endif #endif
priv->ind_table_max_size = config.ind_table_max_size =
device_attr_ex.rss_caps.max_rwq_indirection_table_size; device_attr_ex.rss_caps.max_rwq_indirection_table_size;
/* Remove this check once DPDK supports larger/variable /* Remove this check once DPDK supports larger/variable
* indirection tables. */ * indirection tables. */
if (priv->ind_table_max_size > if (config.ind_table_max_size >
(unsigned int)ETH_RSS_RETA_SIZE_512) (unsigned int)ETH_RSS_RETA_SIZE_512)
priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512; config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
DEBUG("maximum RX indirection table size is %u", DEBUG("maximum RX indirection table size is %u",
priv->ind_table_max_size); config.ind_table_max_size);
priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps & config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
DEBUG("VLAN stripping is %ssupported", DEBUG("VLAN stripping is %ssupported",
(priv->hw_vlan_strip ? "" : "not ")); (config.hw_vlan_strip ? "" : "not "));
priv->hw_fcs_strip = config.hw_fcs_strip =
!!(device_attr_ex.orig_attr.device_cap_flags & !!(device_attr_ex.orig_attr.device_cap_flags &
IBV_WQ_FLAGS_SCATTER_FCS); IBV_WQ_FLAGS_SCATTER_FCS);
DEBUG("FCS stripping configuration is %ssupported", DEBUG("FCS stripping configuration is %ssupported",
(priv->hw_fcs_strip ? "" : "not ")); (config.hw_fcs_strip ? "" : "not "));
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align; config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
#endif #endif
DEBUG("hardware RX end alignment padding is %ssupported", DEBUG("hardware RX end alignment padding is %ssupported",
(priv->hw_padding ? "" : "not ")); (config.hw_padding ? "" : "not "));
priv_get_num_vfs(priv, &num_vfs); priv_get_num_vfs(priv, &num_vfs);
priv->sriov = (num_vfs || sriov); config.sriov = (num_vfs || sriov);
priv->tso = ((priv->tso) && if (config.tso)
(device_attr_ex.tso_caps.max_tso > 0) && config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
(device_attr_ex.tso_caps.supported_qpts & (device_attr_ex.tso_caps.supported_qpts &
(1 << IBV_QPT_RAW_PACKET))); (1 << IBV_QPT_RAW_PACKET)));
if (priv->tso) if (config.tso)
priv->max_tso_payload_sz = config.tso_max_payload_sz =
device_attr_ex.tso_caps.max_tso; device_attr_ex.tso_caps.max_tso;
if (priv->mps && !mps) { if (config.mps && !mps) {
ERROR("multi-packet send not supported on this device" ERROR("multi-packet send not supported on this device"
" (" MLX5_TXQ_MPW_EN ")"); " (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP; err = ENOTSUP;
goto port_error; goto port_error;
} else if (priv->mps && priv->tso) { } else if (config.mps && config.tso) {
WARN("multi-packet send not supported in conjunction " WARN("multi-packet send not supported in conjunction "
"with TSO. MPS disabled"); "with TSO. MPS disabled");
priv->mps = 0; config.mps = 0;
} }
INFO("%sMPS is %s", INFO("%sMPS is %s",
priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "", config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
/* Set default values for Enhanced MPW, a.k.a MPWv2. */ if (config.cqe_comp && !cqe_comp) {
if (priv->mps == MLX5_MPW_ENHANCED) {
if (args.txqs_inline == MLX5_ARG_UNSET)
priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
priv->inline_max_packet_sz =
MLX5_EMPW_MAX_INLINE_LEN;
if (args.txq_inline == MLX5_ARG_UNSET)
priv->txq_inline = MLX5_WQE_SIZE_MAX -
MLX5_WQE_SIZE;
}
if (priv->cqe_comp && !cqe_comp) {
WARN("Rx CQE compression isn't supported"); WARN("Rx CQE compression isn't supported");
priv->cqe_comp = 0; config.cqe_comp = 0;
} }
/* Configure the first MAC address by default. */ /* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) { if (priv_get_mac(priv, &mac.addr_bytes)) {
@ -945,6 +883,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("forcing Ethernet interface up"); DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP); priv_set_flags(priv, ~IFF_UP, IFF_UP);
mlx5_link_update(priv->dev, 1); mlx5_link_update(priv->dev, 1);
/* Store device configuration on private structure. */
priv->config = config;
continue; continue;
port_error: port_error:

View File

@ -90,6 +90,39 @@ struct mlx5_xstats_ctrl {
/* Flow list . */ /* Flow list . */
TAILQ_HEAD(mlx5_flows, rte_flow); TAILQ_HEAD(mlx5_flows, rte_flow);
/* Default PMD specific parameter value. */
#define MLX5_ARG_UNSET (-1)
/*
* Device configuration structure.
*
* Merged configuration from:
*
* - Device capabilities,
* - User device parameters disabled features.
*/
struct mlx5_dev_config {
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
unsigned int mps:2; /* Multi-packet send supported mode. */
unsigned int tunnel_en:1; /* Whether tunnel is supported. */
unsigned int counter_set_supported:1; /* Counter set is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int tso:1; /* Whether TSO is enabled. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int ind_table_max_size; /* Maximum indirection table size. */
int txq_inline; /* Maximum packet size for inlining. */
int txqs_inline; /* Queue number threshold for inlining. */
int inline_max_packet_sz; /* Max packet size for inlining. */
};
struct priv { struct priv {
struct rte_eth_dev *dev; /* Ethernet device of master process. */ struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */ struct ibv_context *ctx; /* Verbs context. */
@ -102,27 +135,8 @@ struct priv {
/* Device properties. */ /* Device properties. */
uint16_t mtu; /* Configured MTU. */ uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */ uint8_t port; /* Physical port number. */
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
unsigned int mps:2; /* Multi-packet send mode (0: disabled). */
unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */ unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tunnel_en:1;
unsigned int isolated:1; /* Whether isolated mode is enabled. */ unsigned int isolated:1; /* Whether isolated mode is enabled. */
unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
unsigned int counter_set_supported:1; /* Counter set is supported. */
/* Whether Tx offloads for tunneled packets are supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
unsigned int txqs_inline; /* Queue number threshold for inlining. */
unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */ /* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */ unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */ unsigned int txqs_n; /* TX queues array size. */
@ -149,6 +163,7 @@ struct priv {
rte_spinlock_t lock; /* Lock for control functions. */ rte_spinlock_t lock; /* Lock for control functions. */
int primary_socket; /* Unix socket for primary process. */ int primary_socket; /* Unix socket for primary process. */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
struct mlx5_dev_config config; /* Device configuration. */
}; };
/** /**

View File

@ -579,7 +579,7 @@ dev_configure(struct rte_eth_dev *dev)
(void *)dev, priv->txqs_n, txqs_n); (void *)dev, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n; priv->txqs_n = txqs_n;
} }
if (rxqs_n > priv->ind_table_max_size) { if (rxqs_n > priv->config.ind_table_max_size) {
ERROR("cannot handle this many RX queues (%u)", rxqs_n); ERROR("cannot handle this many RX queues (%u)", rxqs_n);
return EINVAL; return EINVAL;
} }
@ -592,7 +592,7 @@ dev_configure(struct rte_eth_dev *dev)
* maximum indirection table size for better balancing. * maximum indirection table size for better balancing.
* The result is always rounded to the next power of two. */ * The result is always rounded to the next power of two. */
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
priv->ind_table_max_size : priv->config.ind_table_max_size :
rxqs_n)); rxqs_n));
if (priv_rss_reta_index_resize(priv, reta_idx_n)) if (priv_rss_reta_index_resize(priv, reta_idx_n))
return ENOMEM; return ENOMEM;
@ -641,6 +641,7 @@ void
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{ {
struct priv *priv = dev->data->dev_private; struct priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
unsigned int max; unsigned int max;
char ifname[IF_NAMESIZE]; char ifname[IF_NAMESIZE];
@ -663,31 +664,31 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_tx_queues = max; info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac); info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa = info->rx_offload_capa =
(priv->hw_csum ? (config->hw_csum ?
(DEV_RX_OFFLOAD_IPV4_CKSUM | (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM) : DEV_RX_OFFLOAD_TCP_CKSUM) :
0) | 0) |
(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) | (priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
DEV_RX_OFFLOAD_TIMESTAMP; DEV_RX_OFFLOAD_TIMESTAMP;
if (!priv->mps) if (!config->mps)
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
if (priv->hw_csum) if (config->hw_csum)
info->tx_offload_capa |= info->tx_offload_capa |=
(DEV_TX_OFFLOAD_IPV4_CKSUM | (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM); DEV_TX_OFFLOAD_TCP_CKSUM);
if (priv->tso) if (config->tso)
info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
if (priv->tunnel_en) if (config->tunnel_en)
info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO); DEV_TX_OFFLOAD_GRE_TNL_TSO);
if (priv_get_ifname(priv, &ifname) == 0) if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname); info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ? info->reta_size = priv->reta_idx_n ?
priv->reta_idx_n : priv->ind_table_max_size; priv->reta_idx_n : config->ind_table_max_size;
info->hash_key_size = priv->rss_conf.rss_key_len; info->hash_key_size = priv->rss_conf.rss_key_len;
info->speed_capa = priv->link_speed_capa; info->speed_capa = priv->link_speed_capa;
priv_unlock(priv); priv_unlock(priv);
@ -1394,10 +1395,11 @@ eth_tx_burst_t
priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{ {
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
struct mlx5_dev_config *config = &priv->config;
assert(priv != NULL); assert(priv != NULL);
/* Select appropriate TX function. */ /* Select appropriate TX function. */
if (priv->mps == MLX5_MPW_ENHANCED) { if (config->mps == MLX5_MPW_ENHANCED) {
if (priv_check_vec_tx_support(priv) > 0) { if (priv_check_vec_tx_support(priv) > 0) {
if (priv_check_raw_vec_tx_support(priv) > 0) if (priv_check_raw_vec_tx_support(priv) > 0)
tx_pkt_burst = mlx5_tx_burst_raw_vec; tx_pkt_burst = mlx5_tx_burst_raw_vec;
@ -1408,10 +1410,10 @@ priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
tx_pkt_burst = mlx5_tx_burst_empw; tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function"); DEBUG("selected Enhanced MPW TX function");
} }
} else if (priv->mps && priv->txq_inline) { } else if (config->mps && (config->txq_inline > 0)) {
tx_pkt_burst = mlx5_tx_burst_mpw_inline; tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function"); DEBUG("selected MPW inline TX function");
} else if (priv->mps) { } else if (config->mps) {
tx_pkt_burst = mlx5_tx_burst_mpw; tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function"); DEBUG("selected MPW TX function");
} }

View File

@ -778,7 +778,7 @@ priv_flow_convert_actions(struct priv *priv,
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1; parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT && } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
priv->counter_set_supported) { priv->config.counter_set_supported) {
parser->count = 1; parser->count = 1;
} else { } else {
goto exit_action_not_supported; goto exit_action_not_supported;

View File

@ -570,6 +570,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
unsigned int i; unsigned int i;
int ret = 0; int ret = 0;
struct mlx5dv_obj obj; struct mlx5dv_obj obj;
struct mlx5_dev_config *config = &priv->config;
assert(rxq_data); assert(rxq_data);
assert(!rxq_ctrl->ibv); assert(!rxq_ctrl->ibv);
@ -606,7 +607,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){ attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0, .comp_mask = 0,
}; };
if (priv->cqe_comp && !rxq_data->hw_timestamp) { if (config->cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |= attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
@ -616,7 +617,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
*/ */
if (rxq_check_vec_support(rxq_data) < 0) if (rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2; attr.cq.ibv.cqe *= 2;
} else if (priv->cqe_comp && rxq_data->hw_timestamp) { } else if (config->cqe_comp && rxq_data->hw_timestamp) {
DEBUG("Rx CQE compression is disabled for HW timestamp"); DEBUG("Rx CQE compression is disabled for HW timestamp");
} }
tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv, tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
@ -651,7 +652,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
} }
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
if (priv->hw_padding) { if (config->hw_padding) {
attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
} }
@ -878,9 +879,14 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
{ {
struct rte_eth_dev *dev = priv->dev; struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl; struct mlx5_rxq_ctrl *tmpl;
const uint16_t desc_n =
desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp); unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
struct mlx5_dev_config *config = &priv->config;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
*/
const uint16_t desc_n =
desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
tmpl = rte_calloc_socket("RXQ", 1, tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) + sizeof(*tmpl) +
@ -938,20 +944,20 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
goto error; goto error;
} }
/* Toggle RX checksum offload if hardware supports it. */ /* Toggle RX checksum offload if hardware supports it. */
if (priv->hw_csum) if (config->hw_csum)
tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
if (priv->hw_csum_l2tun) if (config->hw_csum_l2tun)
tmpl->rxq.csum_l2tun = tmpl->rxq.csum_l2tun =
!!dev->data->dev_conf.rxmode.hw_ip_checksum; !!dev->data->dev_conf.rxmode.hw_ip_checksum;
tmpl->rxq.hw_timestamp = tmpl->rxq.hw_timestamp =
!!dev->data->dev_conf.rxmode.hw_timestamp; !!dev->data->dev_conf.rxmode.hw_timestamp;
/* Configure VLAN stripping. */ /* Configure VLAN stripping. */
tmpl->rxq.vlan_strip = (priv->hw_vlan_strip && tmpl->rxq.vlan_strip = (config->hw_vlan_strip &&
!!dev->data->dev_conf.rxmode.hw_vlan_strip); !!dev->data->dev_conf.rxmode.hw_vlan_strip);
/* By default, FCS (CRC) is stripped by hardware. */ /* By default, FCS (CRC) is stripped by hardware. */
if (dev->data->dev_conf.rxmode.hw_strip_crc) { if (dev->data->dev_conf.rxmode.hw_strip_crc) {
tmpl->rxq.crc_present = 0; tmpl->rxq.crc_present = 0;
} else if (priv->hw_fcs_strip) { } else if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1; tmpl->rxq.crc_present = 1;
} else { } else {
WARN("%p: CRC stripping has been disabled but will still" WARN("%p: CRC stripping has been disabled but will still"

View File

@ -287,10 +287,10 @@ priv_check_raw_vec_tx_support(struct priv *priv)
int __attribute__((cold)) int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv) priv_check_vec_tx_support(struct priv *priv)
{ {
if (!priv->tx_vec_en || if (!priv->config.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS || priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
priv->mps != MLX5_MPW_ENHANCED || priv->config.mps != MLX5_MPW_ENHANCED ||
priv->tso) priv->config.tso)
return -ENOTSUP; return -ENOTSUP;
return 1; return 1;
} }
@ -310,7 +310,7 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
struct mlx5_rxq_ctrl *ctrl = struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq); container_of(rxq, struct mlx5_rxq_ctrl, rxq);
if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0) if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP; return -ENOTSUP;
return 1; return 1;
} }
@ -329,7 +329,7 @@ priv_check_vec_rx_support(struct priv *priv)
{ {
uint16_t i; uint16_t i;
if (!priv->rx_vec_en) if (!priv->config.rx_vec_en)
return -ENOTSUP; return -ENOTSUP;
/* All the configured queues should support. */ /* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) { for (i = 0; i < priv->rxqs_n; ++i) {

View File

@ -272,6 +272,25 @@ priv_tx_uar_remap(struct priv *priv, int fd)
return 0; return 0;
} }
/**
* Check if the burst function is using eMPW.
*
* @param tx_pkt_burst
* Tx burst function pointer.
*
* @return
* 1 if the burst function is using eMPW, 0 otherwise.
*/
static int
is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
{
if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
tx_pkt_burst == mlx5_tx_burst_vec ||
tx_pkt_burst == mlx5_tx_burst_empw)
return 1;
return 0;
}
/** /**
* Create the Tx queue Verbs object. * Create the Tx queue Verbs object.
* *
@ -302,6 +321,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
struct mlx5dv_cq cq_info; struct mlx5dv_cq cq_info;
struct mlx5dv_obj obj; struct mlx5dv_obj obj;
const int desc = 1 << txq_data->elts_n; const int desc = 1 << txq_data->elts_n;
eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
int ret = 0; int ret = 0;
assert(txq_data); assert(txq_data);
@ -316,7 +336,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
}; };
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ? cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1; ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
if (priv->mps == MLX5_MPW_ENHANCED) if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0); tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) { if (tmpl.cq == NULL) {
@ -540,6 +560,107 @@ mlx5_priv_txq_ibv_verify(struct priv *priv)
return ret; return ret;
} }
/**
* Set Tx queue parameters from device configuration.
*
* @param txq_ctrl
* Pointer to Tx queue control structure.
*/
static void
txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
struct priv *priv = txq_ctrl->priv;
struct mlx5_dev_config *config = &priv->config;
const unsigned int max_tso_inline =
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
unsigned int txq_inline;
unsigned int txqs_inline;
unsigned int inline_max_packet_sz;
eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
int is_empw_func = is_empw_burst_func(tx_pkt_burst);
txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
0 : config->txq_inline;
txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
0 : config->txqs_inline;
inline_max_packet_sz =
(config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
0 : config->inline_max_packet_sz;
if (is_empw_func) {
if (config->txq_inline == MLX5_ARG_UNSET)
txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
if (config->txqs_inline == MLX5_ARG_UNSET)
txqs_inline = MLX5_EMPW_MIN_TXQS;
if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
}
if (txq_inline && priv->txqs_n >= txqs_inline) {
unsigned int ds_cnt;
txq_ctrl->txq.max_inline =
((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
/* TSO and MPS can't be enabled concurrently. */
assert(!config->tso || !config->mps);
if (is_empw_func) {
/* To minimize the size of data set, avoid requesting
* too large WQ.
*/
txq_ctrl->max_inline_data =
((RTE_MIN(txq_inline,
inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
} else if (config->tso) {
int inline_diff = txq_ctrl->txq.max_inline -
max_tso_inline;
/*
* Adjust inline value as Verbs aggregates
* tso_inline and txq_inline fields.
*/
txq_ctrl->max_inline_data = inline_diff > 0 ?
inline_diff *
RTE_CACHE_LINE_SIZE :
0;
} else {
txq_ctrl->max_inline_data =
txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
}
/*
* Check if the inline size is too large in a way which
* can make the WQE DS to overflow.
* Considering in calculation:
* WQE CTRL (1 DS)
* WQE ETH (1 DS)
* Inline part (N DS)
*/
ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
if (ds_cnt > MLX5_DSEG_MAX) {
unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
MLX5_WQE_DWORD_SIZE;
max_inline = max_inline - (max_inline %
RTE_CACHE_LINE_SIZE);
WARN("txq inline is too large (%d) setting it to "
"the maximum possible: %d\n",
txq_inline, max_inline);
txq_ctrl->txq.max_inline = max_inline /
RTE_CACHE_LINE_SIZE;
}
}
if (config->tso) {
txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
max_tso_inline);
txq_ctrl->txq.tso_en = 1;
}
txq_ctrl->txq.tunnel_en = config->tunnel_en;
}
/** /**
* Create a DPDK Tx queue. * Create a DPDK Tx queue.
* *
@ -562,9 +683,6 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
unsigned int socket, unsigned int socket,
const struct rte_eth_txconf *conf) const struct rte_eth_txconf *conf)
{ {
const unsigned int max_tso_inline =
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
struct mlx5_txq_ctrl *tmpl; struct mlx5_txq_ctrl *tmpl;
tmpl = rte_calloc_socket("TXQ", 1, tmpl = rte_calloc_socket("TXQ", 1,
@ -578,76 +696,12 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
tmpl->priv = priv; tmpl->priv = priv;
tmpl->socket = socket; tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc); tmpl->txq.elts_n = log2above(desc);
if (priv->mps == MLX5_MPW_ENHANCED) txq_set_params(tmpl);
tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
/* MRs will be registered in mp2mr[] later. */ /* MRs will be registered in mp2mr[] later. */
DEBUG("priv->device_attr.max_qp_wr is %d", DEBUG("priv->device_attr.max_qp_wr is %d",
priv->device_attr.orig_attr.max_qp_wr); priv->device_attr.orig_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d", DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.orig_attr.max_sge); priv->device_attr.orig_attr.max_sge);
if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
unsigned int ds_cnt;
tmpl->txq.max_inline =
((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
/* TSO and MPS can't be enabled concurrently. */
assert(!priv->tso || !priv->mps);
if (priv->mps == MLX5_MPW_ENHANCED) {
tmpl->txq.inline_max_packet_sz =
priv->inline_max_packet_sz;
/* To minimize the size of data set, avoid requesting
* too large WQ.
*/
tmpl->max_inline_data =
((RTE_MIN(priv->txq_inline,
priv->inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
} else if (priv->tso) {
int inline_diff = tmpl->txq.max_inline - max_tso_inline;
/*
* Adjust inline value as Verbs aggregates
* tso_inline and txq_inline fields.
*/
tmpl->max_inline_data = inline_diff > 0 ?
inline_diff *
RTE_CACHE_LINE_SIZE :
0;
} else {
tmpl->max_inline_data =
tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE;
}
/*
* Check if the inline size is too large in a way which
* can make the WQE DS to overflow.
* Considering in calculation:
* WQE CTRL (1 DS)
* WQE ETH (1 DS)
* Inline part (N DS)
*/
ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
if (ds_cnt > MLX5_DSEG_MAX) {
unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
MLX5_WQE_DWORD_SIZE;
max_inline = max_inline - (max_inline %
RTE_CACHE_LINE_SIZE);
WARN("txq inline is too large (%d) setting it to "
"the maximum possible: %d\n",
priv->txq_inline, max_inline);
tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
}
}
if (priv->tso) {
tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
max_tso_inline);
tmpl->txq.tso_en = 1;
}
if (priv->tunnel_en)
tmpl->txq.tunnel_en = 1;
tmpl->txq.elts = tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx; tmpl->txq.stats.idx = idx;

View File

@ -165,7 +165,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
struct priv *priv = dev->data->dev_private; struct priv *priv = dev->data->dev_private;
/* Validate hw support */ /* Validate hw support */
if (!priv->hw_vlan_strip) { if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported"); ERROR("VLAN stripping is not supported");
return; return;
} }
@ -198,7 +198,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) { if (mask & ETH_VLAN_STRIP_MASK) {
int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip; int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
if (!priv->hw_vlan_strip) { if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported"); ERROR("VLAN stripping is not supported");
return 0; return 0;
} }