net/mlx5: support hardware TSO

Implement support for hardware TSO.

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
This commit is contained in:
Shahaf Shuler 2017-03-02 11:01:31 +02:00 committed by Ferruh Yigit
parent 9633482a6e
commit 3f13f8c23a
9 changed files with 160 additions and 16 deletions

View File

@ -11,6 +11,7 @@ Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y

View File

@ -90,6 +90,7 @@ Features
- Secondary process TX is supported.
- KVM and VMware ESX SR-IOV modes are supported.
- RSS hash result is supported.
- Hardware TSO.
Limitations
-----------
@ -186,9 +187,20 @@ Run-time configuration
save PCI bandwidth and improve performance at the cost of a slightly
higher CPU usage.
This option cannot be used in conjunction with ``tso`` below. When ``tso``
is set, ``txq_mpw_en`` is disabled.
It is currently only supported on the ConnectX-4 Lx and ConnectX-5
families of adapters. Enabled by default.
- ``tso`` parameter [int]
A nonzero value enables hardware TSO.
When hardware TSO is enabled, packets marked with TCP segmentation
offload will be divided into segments by the hardware.
Disabled by default.
Prerequisites
-------------

View File

@ -84,6 +84,9 @@
/* Device parameter to enable multi-packet send WQEs. */
#define MLX5_TXQ_MPW_EN "txq_mpw_en"
/* Device parameter to enable hardware TSO offload. */
#define MLX5_TSO "tso"
/**
* Retrieve integer value from environment variable.
*
@ -292,6 +295,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
priv->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
priv->mps &= !!tmp; /* Enable MPW only if HW supports */
} else if (strcmp(MLX5_TSO, key) == 0) {
priv->tso = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@ -318,6 +323,7 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
MLX5_TXQ_INLINE,
MLX5_TXQS_MIN_INLINE,
MLX5_TXQ_MPW_EN,
MLX5_TSO,
NULL,
};
struct rte_kvargs *kvlist;
@ -481,6 +487,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
IBV_EXP_DEVICE_ATTR_RX_HASH |
IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
IBV_EXP_DEVICE_ATTR_TSO_CAPS |
0;
DEBUG("using port %u (%08" PRIx32 ")", port, test);
@ -582,11 +589,22 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv_get_num_vfs(priv, &num_vfs);
priv->sriov = (num_vfs || sriov);
priv->tso = ((priv->tso) &&
(exp_device_attr.tso_caps.max_tso > 0) &&
(exp_device_attr.tso_caps.supported_qpts &
(1 << IBV_QPT_RAW_ETH)));
if (priv->tso)
priv->max_tso_payload_sz =
exp_device_attr.tso_caps.max_tso;
if (priv->mps && !mps) {
ERROR("multi-packet send not supported on this device"
" (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
} else if (priv->mps && priv->tso) {
WARN("multi-packet send not supported in conjunction "
"with TSO. MPS disabled");
priv->mps = 0;
}
/* Allocate and register default RSS hash keys. */
priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,

View File

@ -126,6 +126,8 @@ struct priv {
unsigned int mps:1; /* Whether multi-packet send is supported. */
unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
unsigned int txqs_inline; /* Queue number threshold for inlining. */
/* RX/TX queues. */

View File

@ -79,4 +79,7 @@
/* Maximum number of extended statistics counters. */
#define MLX5_MAX_XSTATS 32
/* Maximum Packet headers size (L2+L3+L4) for TSO. */
#define MLX5_MAX_TSO_HEADER 128
#endif /* RTE_PMD_MLX5_DEFS_H_ */

View File

@ -693,6 +693,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
(DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
if (priv->tso)
info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
/* FIXME: RETA update/query API expects the callee to know the size of

View File

@ -441,6 +441,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
const unsigned int elts_n = 1 << txq->elts_n;
unsigned int i = 0;
unsigned int j = 0;
unsigned int k = 0;
unsigned int max;
uint16_t max_wqe;
unsigned int comp;
@ -468,8 +469,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uintptr_t addr;
uint64_t naddr;
uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
uint16_t tso_header_sz = 0;
uint16_t ehdr;
uint8_t cs_flags = 0;
uint64_t tso = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
#endif
@ -541,14 +544,74 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
length -= pkt_inline_sz;
addr += pkt_inline_sz;
}
if (txq->tso_en) {
tso = buf->ol_flags & PKT_TX_TCP_SEG;
if (tso) {
uintptr_t end = (uintptr_t)
(((uintptr_t)txq->wqes) +
(1 << txq->wqe_n) *
MLX5_WQE_SIZE);
unsigned int copy_b;
uint8_t vlan_sz = (buf->ol_flags &
PKT_TX_VLAN_PKT) ? 4 : 0;
tso_header_sz = buf->l2_len + vlan_sz +
buf->l3_len + buf->l4_len;
if (unlikely(tso_header_sz >
MLX5_MAX_TSO_HEADER))
break;
copy_b = tso_header_sz - pkt_inline_sz;
/* First seg must contain all headers. */
assert(copy_b <= length);
raw += MLX5_WQE_DWORD_SIZE;
if (copy_b &&
((end - (uintptr_t)raw) > copy_b)) {
uint16_t n = (MLX5_WQE_DS(copy_b) -
1 + 3) / 4;
if (unlikely(max_wqe < n))
break;
max_wqe -= n;
rte_memcpy((void *)raw,
(void *)addr, copy_b);
addr += copy_b;
length -= copy_b;
pkt_inline_sz += copy_b;
/*
* Another DWORD will be added
* in the inline part.
*/
raw += MLX5_WQE_DS(copy_b) *
MLX5_WQE_DWORD_SIZE -
MLX5_WQE_DWORD_SIZE;
} else {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
htonl(txq->wqe_ci << 8),
htonl(txq->qp_num_8s | 1),
0,
0,
};
ds = 1;
total_length = 0;
pkts--;
pkts_n++;
elts_head = (elts_head - 1) &
(elts_n - 1);
k++;
goto next_wqe;
}
}
}
/* Inline if enough room. */
if (txq->max_inline) {
if (txq->inline_en || tso) {
uintptr_t end = (uintptr_t)
(((uintptr_t)txq->wqes) +
(1 << txq->wqe_n) * MLX5_WQE_SIZE);
unsigned int max_inline = txq->max_inline *
RTE_CACHE_LINE_SIZE -
MLX5_WQE_DWORD_SIZE;
(pkt_inline_sz - 2);
uintptr_t addr_end = (addr + max_inline) &
~(RTE_CACHE_LINE_SIZE - 1);
unsigned int copy_b = (addr_end > addr) ?
@ -567,6 +630,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(max_wqe < n))
break;
max_wqe -= n;
if (tso) {
uint32_t inl =
htonl(copy_b | MLX5_INLINE_SEG);
pkt_inline_sz =
MLX5_WQE_DS(tso_header_sz) *
MLX5_WQE_DWORD_SIZE;
rte_memcpy((void *)raw,
(void *)&inl, sizeof(inl));
raw += sizeof(inl);
pkt_inline_sz += sizeof(inl);
}
rte_memcpy((void *)raw, (void *)addr, copy_b);
addr += copy_b;
length -= copy_b;
@ -667,18 +742,34 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
next_pkt:
++i;
/* Initialize known and common part of the WQE structure. */
wqe->ctrl = (rte_v128u32_t){
htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
htonl(txq->qp_num_8s | ds),
0,
0,
};
wqe->eseg = (rte_v128u32_t){
0,
cs_flags,
0,
(ehdr << 16) | htons(pkt_inline_sz),
};
if (tso) {
wqe->ctrl = (rte_v128u32_t){
htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
htonl(txq->qp_num_8s | ds),
0,
0,
};
wqe->eseg = (rte_v128u32_t){
0,
cs_flags | (htons(buf->tso_segsz) << 16),
0,
(ehdr << 16) | htons(tso_header_sz),
};
} else {
wqe->ctrl = (rte_v128u32_t){
htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
htonl(txq->qp_num_8s | ds),
0,
0,
};
wqe->eseg = (rte_v128u32_t){
0,
cs_flags,
0,
(ehdr << 16) | htons(pkt_inline_sz),
};
}
next_wqe:
txq->wqe_ci += (ds + 3) / 4;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
@ -686,10 +777,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
#endif
} while (pkts_n);
/* Take a shortcut if nothing must be sent. */
if (unlikely(i == 0))
if (unlikely((i + k) == 0))
return 0;
/* Check whether completion threshold has been reached. */
comp = txq->elts_comp + i + j;
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe_ctrl *w =
(volatile struct mlx5_wqe_ctrl *)wqe;

View File

@ -254,6 +254,8 @@ struct txq {
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_en:1; /* When set inline is enabled. */
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */

View File

@ -342,6 +342,19 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
RTE_CACHE_LINE_SIZE);
attr.init.cap.max_inline_data =
tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
tmpl.txq.inline_en = 1;
}
if (priv->tso) {
uint16_t max_tso_inline = ((MLX5_MAX_TSO_HEADER +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
attr.init.max_tso_header =
max_tso_inline * RTE_CACHE_LINE_SIZE;
attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
max_tso_inline);
tmpl.txq.tso_en = 1;
}
tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {