net/mlx5: convert to new Tx offloads API
Ethdev Tx offloads API has changed since:
commit cba7f53b71
("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
This commit is contained in:
parent
73b620f211
commit
dbccb4cddc
@ -262,8 +262,9 @@ Run-time configuration
|
||||
Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
|
||||
in the same descriptor.
|
||||
|
||||
This option cannot be used in conjunction with ``tso`` below. When ``tso``
|
||||
is set, ``txq_mpw_en`` is disabled.
|
||||
This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
|
||||
DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``.
|
||||
When those offloads are requested the MPS send function will not be used.
|
||||
|
||||
It is currently only supported on the ConnectX-4 Lx and ConnectX-5
|
||||
families of adapters. Enabled by default.
|
||||
@ -284,17 +285,15 @@ Run-time configuration
|
||||
|
||||
Effective only when Enhanced MPS is supported. The default value is 256.
|
||||
|
||||
- ``tso`` parameter [int]
|
||||
|
||||
A nonzero value enables hardware TSO.
|
||||
When hardware TSO is enabled, packets marked with TCP segmentation
|
||||
offload will be divided into segments by the hardware. Disabled by default.
|
||||
|
||||
- ``tx_vec_en`` parameter [int]
|
||||
|
||||
A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
|
||||
global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
|
||||
|
||||
This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
|
||||
DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``.
|
||||
When those offloads are requested the MPS send function will not be used.
|
||||
|
||||
Enabled by default on ConnectX-5.
|
||||
|
||||
- ``rx_vec_en`` parameter [int]
|
||||
|
@ -85,9 +85,6 @@
|
||||
/* Device parameter to limit the size of inlining packet. */
|
||||
#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
|
||||
|
||||
/* Device parameter to enable hardware TSO offload. */
|
||||
#define MLX5_TSO "tso"
|
||||
|
||||
/* Device parameter to enable hardware Tx vector. */
|
||||
#define MLX5_TX_VEC_EN "tx_vec_en"
|
||||
|
||||
@ -406,8 +403,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
|
||||
config->mpw_hdr_dseg = !!tmp;
|
||||
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
|
||||
config->inline_max_packet_sz = tmp;
|
||||
} else if (strcmp(MLX5_TSO, key) == 0) {
|
||||
config->tso = !!tmp;
|
||||
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
|
||||
config->tx_vec_en = !!tmp;
|
||||
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
|
||||
@ -440,7 +435,6 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
|
||||
MLX5_TXQ_MPW_EN,
|
||||
MLX5_TXQ_MPW_HDR_DSEG_EN,
|
||||
MLX5_TXQ_MAX_INLINE_LEN,
|
||||
MLX5_TSO,
|
||||
MLX5_TX_VEC_EN,
|
||||
MLX5_RX_VEC_EN,
|
||||
NULL,
|
||||
@ -629,7 +623,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
.cqe_comp = cqe_comp,
|
||||
.mps = mps,
|
||||
.tunnel_en = tunnel_en,
|
||||
.tso = 0,
|
||||
.tx_vec_en = 1,
|
||||
.rx_vec_en = 1,
|
||||
.mpw_hdr_dseg = 0,
|
||||
@ -793,10 +786,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
|
||||
priv_get_num_vfs(priv, &num_vfs);
|
||||
config.sriov = (num_vfs || sriov);
|
||||
if (config.tso)
|
||||
config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
|
||||
(device_attr_ex.tso_caps.supported_qpts &
|
||||
(1 << IBV_QPT_RAW_PACKET)));
|
||||
config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
|
||||
(device_attr_ex.tso_caps.supported_qpts &
|
||||
(1 << IBV_QPT_RAW_PACKET)));
|
||||
if (config.tso)
|
||||
config.tso_max_payload_sz =
|
||||
device_attr_ex.tso_caps.max_tso;
|
||||
@ -805,10 +797,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
" (" MLX5_TXQ_MPW_EN ")");
|
||||
err = ENOTSUP;
|
||||
goto port_error;
|
||||
} else if (config.mps && config.tso) {
|
||||
WARN("multi-packet send not supported in conjunction "
|
||||
"with TSO. MPS disabled");
|
||||
config.mps = 0;
|
||||
}
|
||||
INFO("%sMPS is %s",
|
||||
config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
|
||||
|
@ -112,7 +112,7 @@ struct mlx5_dev_config {
|
||||
unsigned int tunnel_en:1; /* Whether tunnel is supported. */
|
||||
unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
|
||||
unsigned int cqe_comp:1; /* CQE compression is enabled. */
|
||||
unsigned int tso:1; /* Whether TSO is enabled. */
|
||||
unsigned int tso:1; /* Whether TSO is supported. */
|
||||
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
|
||||
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
|
||||
unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
|
||||
|
@ -551,7 +551,15 @@ dev_configure(struct rte_eth_dev *dev)
|
||||
unsigned int reta_idx_n;
|
||||
const uint8_t use_app_rss_key =
|
||||
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
|
||||
uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
|
||||
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
|
||||
|
||||
if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
|
||||
ERROR("Some Tx offloads are not supported "
|
||||
"requested 0x%" PRIx64 " supported 0x%" PRIx64,
|
||||
tx_offloads, supp_tx_offloads);
|
||||
return ENOTSUP;
|
||||
}
|
||||
if (use_app_rss_key &&
|
||||
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
|
||||
rss_hash_default_key_len)) {
|
||||
@ -672,19 +680,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
|
||||
(priv->config.hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
|
||||
DEV_RX_OFFLOAD_TIMESTAMP;
|
||||
|
||||
if (!config->mps)
|
||||
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
|
||||
if (config->hw_csum)
|
||||
info->tx_offload_capa |=
|
||||
(DEV_TX_OFFLOAD_IPV4_CKSUM |
|
||||
DEV_TX_OFFLOAD_UDP_CKSUM |
|
||||
DEV_TX_OFFLOAD_TCP_CKSUM);
|
||||
if (config->tso)
|
||||
info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
|
||||
if (config->tunnel_en)
|
||||
info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
|
||||
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
|
||||
DEV_TX_OFFLOAD_GRE_TNL_TSO);
|
||||
info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
|
||||
if (priv_get_ifname(priv, &ifname) == 0)
|
||||
info->if_index = if_nametoindex(ifname);
|
||||
info->reta_size = priv->reta_idx_n ?
|
||||
@ -1392,16 +1388,23 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
|
||||
* Pointer to selected Tx burst function.
|
||||
*/
|
||||
eth_tx_burst_t
|
||||
priv_select_tx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
|
||||
priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
|
||||
{
|
||||
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
|
||||
struct mlx5_dev_config *config = &priv->config;
|
||||
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
|
||||
int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
|
||||
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
|
||||
DEV_TX_OFFLOAD_GRE_TNL_TSO));
|
||||
int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
|
||||
|
||||
assert(priv != NULL);
|
||||
/* Select appropriate TX function. */
|
||||
if (vlan_insert || tso)
|
||||
return tx_pkt_burst;
|
||||
if (config->mps == MLX5_MPW_ENHANCED) {
|
||||
if (priv_check_vec_tx_support(priv) > 0) {
|
||||
if (priv_check_raw_vec_tx_support(priv) > 0)
|
||||
if (priv_check_vec_tx_support(priv, dev) > 0) {
|
||||
if (priv_check_raw_vec_tx_support(priv, dev) > 0)
|
||||
tx_pkt_burst = mlx5_tx_burst_raw_vec;
|
||||
else
|
||||
tx_pkt_burst = mlx5_tx_burst_vec;
|
||||
|
@ -1994,16 +1994,18 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
}
|
||||
|
||||
int __attribute__((weak))
|
||||
priv_check_raw_vec_tx_support(struct priv *priv)
|
||||
priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
|
||||
{
|
||||
(void)priv;
|
||||
(void)dev;
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
int __attribute__((weak))
|
||||
priv_check_vec_tx_support(struct priv *priv)
|
||||
priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
|
||||
{
|
||||
(void)priv;
|
||||
(void)dev;
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
@ -200,7 +200,7 @@ struct mlx5_txq_data {
|
||||
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
|
||||
uint16_t mr_cache_idx; /* Index of last hit entry. */
|
||||
uint32_t qp_num_8s; /* QP number shifted by 8. */
|
||||
uint32_t flags; /* Flags for Tx Queue. */
|
||||
uint64_t offloads; /* Offloads for Tx Queue. */
|
||||
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
|
||||
volatile void *wqes; /* Work queue (use volatile to write into). */
|
||||
volatile uint32_t *qp_db; /* Work queue doorbell. */
|
||||
@ -292,6 +292,7 @@ int mlx5_priv_txq_release(struct priv *, uint16_t);
|
||||
int mlx5_priv_txq_releasable(struct priv *, uint16_t);
|
||||
int mlx5_priv_txq_verify(struct priv *);
|
||||
void txq_alloc_elts(struct mlx5_txq_ctrl *);
|
||||
uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
|
||||
|
||||
/* mlx5_rxtx.c */
|
||||
|
||||
@ -309,8 +310,8 @@ int mlx5_rx_descriptor_status(void *, uint16_t);
|
||||
int mlx5_tx_descriptor_status(void *, uint16_t);
|
||||
|
||||
/* Vectorized version of mlx5_rxtx.c */
|
||||
int priv_check_raw_vec_tx_support(struct priv *);
|
||||
int priv_check_vec_tx_support(struct priv *);
|
||||
int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *);
|
||||
int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *);
|
||||
int rxq_check_vec_support(struct mlx5_rxq_data *);
|
||||
int priv_check_vec_rx_support(struct priv *);
|
||||
uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
|
||||
|
@ -160,15 +160,15 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
uint16_t ret;
|
||||
|
||||
/* Transmit multi-seg packets in the head of pkts list. */
|
||||
if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
|
||||
if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
|
||||
NB_SEGS(pkts[nb_tx]) > 1)
|
||||
nb_tx += txq_scatter_v(txq,
|
||||
&pkts[nb_tx],
|
||||
pkts_n - nb_tx);
|
||||
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
|
||||
if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
|
||||
if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
|
||||
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
|
||||
if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
|
||||
if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
|
||||
n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
|
||||
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
|
||||
nb_tx += ret;
|
||||
@ -253,24 +253,20 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
* @param dev
|
||||
* Pointer to rte_eth_dev structure.
|
||||
*
|
||||
* @return
|
||||
* 1 if supported, negative errno value if not.
|
||||
*/
|
||||
int __attribute__((cold))
|
||||
priv_check_raw_vec_tx_support(struct priv *priv)
|
||||
priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
|
||||
struct rte_eth_dev *dev)
|
||||
{
|
||||
uint16_t i;
|
||||
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
|
||||
|
||||
/* All the configured queues should support. */
|
||||
for (i = 0; i < priv->txqs_n; ++i) {
|
||||
struct mlx5_txq_data *txq = (*priv->txqs)[i];
|
||||
|
||||
if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
|
||||
!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
|
||||
break;
|
||||
}
|
||||
if (i != priv->txqs_n)
|
||||
/* Doesn't support any offload. */
|
||||
if (offloads)
|
||||
return -ENOTSUP;
|
||||
return 1;
|
||||
}
|
||||
@ -280,17 +276,21 @@ priv_check_raw_vec_tx_support(struct priv *priv)
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
* @param dev
|
||||
* Pointer to rte_eth_dev structure.
|
||||
*
|
||||
* @return
|
||||
* 1 if supported, negative errno value if not.
|
||||
*/
|
||||
int __attribute__((cold))
|
||||
priv_check_vec_tx_support(struct priv *priv)
|
||||
priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
|
||||
{
|
||||
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
|
||||
|
||||
if (!priv->config.tx_vec_en ||
|
||||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
|
||||
priv->config.mps != MLX5_MPW_ENHANCED ||
|
||||
priv->config.tso)
|
||||
offloads & ~MLX5_VEC_TX_OFFLOAD_CAP)
|
||||
return -ENOTSUP;
|
||||
return 1;
|
||||
}
|
||||
|
@ -40,6 +40,18 @@
|
||||
#include "mlx5_autoconf.h"
|
||||
#include "mlx5_prm.h"
|
||||
|
||||
/* HW checksum offload capabilities of vectorized Tx. */
|
||||
#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
|
||||
(DEV_TX_OFFLOAD_IPV4_CKSUM | \
|
||||
DEV_TX_OFFLOAD_UDP_CKSUM | \
|
||||
DEV_TX_OFFLOAD_TCP_CKSUM | \
|
||||
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
|
||||
|
||||
/* HW offload capabilities of vectorized Tx. */
|
||||
#define MLX5_VEC_TX_OFFLOAD_CAP \
|
||||
(MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
|
||||
DEV_TX_OFFLOAD_MULTI_SEGS)
|
||||
|
||||
/*
|
||||
* Compile time sanity check for vectorized functions.
|
||||
*/
|
||||
|
@ -115,6 +115,63 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the per-port supported offloads.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
*
|
||||
* @return
|
||||
* Supported Tx offloads.
|
||||
*/
|
||||
uint64_t
|
||||
mlx5_priv_get_tx_port_offloads(struct priv *priv)
|
||||
{
|
||||
uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
|
||||
DEV_TX_OFFLOAD_VLAN_INSERT);
|
||||
struct mlx5_dev_config *config = &priv->config;
|
||||
|
||||
if (config->hw_csum)
|
||||
offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
|
||||
DEV_TX_OFFLOAD_UDP_CKSUM |
|
||||
DEV_TX_OFFLOAD_TCP_CKSUM);
|
||||
if (config->tso)
|
||||
offloads |= DEV_TX_OFFLOAD_TCP_TSO;
|
||||
if (config->tunnel_en) {
|
||||
if (config->hw_csum)
|
||||
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
|
||||
if (config->tso)
|
||||
offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
|
||||
DEV_TX_OFFLOAD_GRE_TNL_TSO);
|
||||
}
|
||||
return offloads;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the per-queue offload configuration is valid.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to private structure.
|
||||
* @param offloads
|
||||
* Per-queue offloads configuration.
|
||||
*
|
||||
* @return
|
||||
* 1 if the configuration is valid, 0 otherwise.
|
||||
*/
|
||||
static int
|
||||
priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
|
||||
{
|
||||
uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
|
||||
uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
|
||||
|
||||
/* There are no Tx offloads which are per queue. */
|
||||
if ((offloads & port_supp_offloads) != offloads)
|
||||
return 0;
|
||||
if ((port_offloads ^ offloads) & port_supp_offloads)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to configure a TX queue.
|
||||
*
|
||||
@ -143,6 +200,20 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
int ret = 0;
|
||||
|
||||
priv_lock(priv);
|
||||
/*
|
||||
* Don't verify port offloads for application which
|
||||
* use the old API.
|
||||
*/
|
||||
if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
|
||||
!priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
|
||||
ret = ENOTSUP;
|
||||
ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
|
||||
"offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
|
||||
(void *)dev, conf->offloads,
|
||||
dev->data->dev_conf.txmode.offloads,
|
||||
mlx5_priv_get_tx_port_offloads(priv));
|
||||
goto out;
|
||||
}
|
||||
if (desc <= MLX5_TX_COMP_THRESH) {
|
||||
WARN("%p: number of descriptors requested for TX queue %u"
|
||||
" must be higher than MLX5_TX_COMP_THRESH, using"
|
||||
@ -579,6 +650,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
unsigned int inline_max_packet_sz;
|
||||
eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
|
||||
int is_empw_func = is_empw_burst_func(tx_pkt_burst);
|
||||
int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
|
||||
|
||||
txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
|
||||
0 : config->txq_inline;
|
||||
@ -603,8 +675,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
txq_ctrl->txq.max_inline =
|
||||
((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
/* TSO and MPS can't be enabled concurrently. */
|
||||
assert(!config->tso || !config->mps);
|
||||
if (is_empw_func) {
|
||||
/* To minimize the size of data set, avoid requesting
|
||||
* too large WQ.
|
||||
@ -614,7 +684,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
inline_max_packet_sz) +
|
||||
(RTE_CACHE_LINE_SIZE - 1)) /
|
||||
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
|
||||
} else if (config->tso) {
|
||||
} else if (tso) {
|
||||
int inline_diff = txq_ctrl->txq.max_inline -
|
||||
max_tso_inline;
|
||||
|
||||
@ -652,7 +722,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
RTE_CACHE_LINE_SIZE;
|
||||
}
|
||||
}
|
||||
if (config->tso) {
|
||||
if (tso) {
|
||||
txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
|
||||
txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
|
||||
max_tso_inline);
|
||||
@ -692,7 +762,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
|
||||
if (!tmpl)
|
||||
return NULL;
|
||||
assert(desc > MLX5_TX_COMP_THRESH);
|
||||
tmpl->txq.flags = conf->txq_flags;
|
||||
tmpl->txq.offloads = conf->offloads;
|
||||
tmpl->priv = priv;
|
||||
tmpl->socket = socket;
|
||||
tmpl->txq.elts_n = log2above(desc);
|
||||
|
Loading…
Reference in New Issue
Block a user