net/idpf: support Tx offloading
Add Tx offloading support: - support TSO for single queue model and split queue model. Signed-off-by: Beilei Xing <beilei.xing@intel.com> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
This commit is contained in:
parent
011bc4a55e
commit
ed5b21acc6
@ -8,6 +8,7 @@
|
||||
;
|
||||
[Features]
|
||||
MTU update = Y
|
||||
TSO = P
|
||||
L3 checksum offload = P
|
||||
L4 checksum offload = P
|
||||
Linux = Y
|
||||
|
@ -67,7 +67,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
|
||||
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
|
||||
|
||||
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
|
||||
dev_info->tx_offload_capa =
|
||||
RTE_ETH_TX_OFFLOAD_TCP_TSO |
|
||||
RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
|
||||
|
||||
dev_info->default_txconf = (struct rte_eth_txconf) {
|
||||
.tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH,
|
||||
|
@ -1506,6 +1506,49 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)
|
||||
cq->tx_tail = next;
|
||||
}
|
||||
|
||||
/* Check if the context descriptor is needed for TX offloading */
|
||||
static inline uint16_t
|
||||
idpf_calc_context_desc(uint64_t flags)
|
||||
{
|
||||
if ((flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* set TSO context descriptor
|
||||
*/
|
||||
static inline void
|
||||
idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,
|
||||
union idpf_tx_offload tx_offload,
|
||||
volatile union idpf_flex_tx_ctx_desc *ctx_desc)
|
||||
{
|
||||
uint16_t cmd_dtype;
|
||||
uint32_t tso_len;
|
||||
uint8_t hdr_len;
|
||||
|
||||
if (tx_offload.l4_len == 0) {
|
||||
PMD_TX_LOG(DEBUG, "L4 length set to 0");
|
||||
return;
|
||||
}
|
||||
|
||||
hdr_len = tx_offload.l2_len +
|
||||
tx_offload.l3_len +
|
||||
tx_offload.l4_len;
|
||||
cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
|
||||
IDPF_TX_FLEX_CTX_DESC_CMD_TSO;
|
||||
tso_len = mbuf->pkt_len - hdr_len;
|
||||
|
||||
ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);
|
||||
ctx_desc->tso.qw0.hdr_len = hdr_len;
|
||||
ctx_desc->tso.qw0.mss_rt =
|
||||
rte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &
|
||||
IDPF_TXD_FLEX_CTX_MSS_RT_M);
|
||||
ctx_desc->tso.qw0.flex_tlen =
|
||||
rte_cpu_to_le_32(tso_len &
|
||||
IDPF_TXD_FLEX_CTX_MSS_RT_M);
|
||||
}
|
||||
|
||||
uint16_t
|
||||
idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts)
|
||||
@ -1514,11 +1557,14 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
volatile struct idpf_flex_tx_sched_desc *txr;
|
||||
volatile struct idpf_flex_tx_sched_desc *txd;
|
||||
struct idpf_tx_entry *sw_ring;
|
||||
union idpf_tx_offload tx_offload = {0};
|
||||
struct idpf_tx_entry *txe, *txn;
|
||||
uint16_t nb_used, tx_id, sw_id;
|
||||
struct rte_mbuf *tx_pkt;
|
||||
uint16_t nb_to_clean;
|
||||
uint16_t nb_tx = 0;
|
||||
uint64_t ol_flags;
|
||||
uint16_t nb_ctx;
|
||||
|
||||
if (unlikely(txq == NULL) || unlikely(!txq->q_started))
|
||||
return nb_tx;
|
||||
@ -1548,7 +1594,29 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
|
||||
if (txq->nb_free < tx_pkt->nb_segs)
|
||||
break;
|
||||
nb_used = tx_pkt->nb_segs;
|
||||
|
||||
ol_flags = tx_pkt->ol_flags;
|
||||
tx_offload.l2_len = tx_pkt->l2_len;
|
||||
tx_offload.l3_len = tx_pkt->l3_len;
|
||||
tx_offload.l4_len = tx_pkt->l4_len;
|
||||
tx_offload.tso_segsz = tx_pkt->tso_segsz;
|
||||
/* Calculate the number of context descriptors needed. */
|
||||
nb_ctx = idpf_calc_context_desc(ol_flags);
|
||||
nb_used = tx_pkt->nb_segs + nb_ctx;
|
||||
|
||||
/* context descriptor */
|
||||
if (nb_ctx != 0) {
|
||||
volatile union idpf_flex_tx_ctx_desc *ctx_desc =
|
||||
(volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];
|
||||
|
||||
if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
|
||||
idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
|
||||
ctx_desc);
|
||||
|
||||
tx_id++;
|
||||
if (tx_id == txq->nb_tx_desc)
|
||||
tx_id = 0;
|
||||
}
|
||||
|
||||
do {
|
||||
txd = &txr[tx_id];
|
||||
@ -1799,14 +1867,17 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
{
|
||||
volatile struct idpf_flex_tx_desc *txd;
|
||||
volatile struct idpf_flex_tx_desc *txr;
|
||||
union idpf_tx_offload tx_offload = {0};
|
||||
struct idpf_tx_entry *txe, *txn;
|
||||
struct idpf_tx_entry *sw_ring;
|
||||
struct idpf_tx_queue *txq;
|
||||
struct rte_mbuf *tx_pkt;
|
||||
struct rte_mbuf *m_seg;
|
||||
uint64_t buf_dma_addr;
|
||||
uint64_t ol_flags;
|
||||
uint16_t tx_last;
|
||||
uint16_t nb_used;
|
||||
uint16_t nb_ctx;
|
||||
uint16_t td_cmd;
|
||||
uint16_t tx_id;
|
||||
uint16_t nb_tx;
|
||||
@ -1833,11 +1904,19 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
tx_pkt = *tx_pkts++;
|
||||
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
|
||||
|
||||
ol_flags = tx_pkt->ol_flags;
|
||||
tx_offload.l2_len = tx_pkt->l2_len;
|
||||
tx_offload.l3_len = tx_pkt->l3_len;
|
||||
tx_offload.l4_len = tx_pkt->l4_len;
|
||||
tx_offload.tso_segsz = tx_pkt->tso_segsz;
|
||||
/* Calculate the number of context descriptors needed. */
|
||||
nb_ctx = idpf_calc_context_desc(ol_flags);
|
||||
|
||||
/* The number of descriptors that must be allocated for
|
||||
* a packet equals to the number of the segments of that
|
||||
* packet plus 1 context descriptor if needed.
|
||||
*/
|
||||
nb_used = (uint16_t)(tx_pkt->nb_segs);
|
||||
nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
|
||||
tx_last = (uint16_t)(tx_id + nb_used - 1);
|
||||
|
||||
/* Circular ring */
|
||||
@ -1865,6 +1944,29 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
}
|
||||
}
|
||||
|
||||
if (nb_ctx != 0) {
|
||||
/* Setup TX context descriptor if required */
|
||||
volatile union idpf_flex_tx_ctx_desc *ctx_txd =
|
||||
(volatile union idpf_flex_tx_ctx_desc *)
|
||||
&txr[tx_id];
|
||||
|
||||
txn = &sw_ring[txe->next_id];
|
||||
RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
|
||||
if (txe->mbuf != NULL) {
|
||||
rte_pktmbuf_free_seg(txe->mbuf);
|
||||
txe->mbuf = NULL;
|
||||
}
|
||||
|
||||
/* TSO enabled */
|
||||
if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
|
||||
idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
|
||||
ctx_txd);
|
||||
|
||||
txe->last_id = tx_last;
|
||||
tx_id = txe->next_id;
|
||||
txe = txn;
|
||||
}
|
||||
|
||||
m_seg = tx_pkt;
|
||||
do {
|
||||
txd = &txr[tx_id];
|
||||
@ -1924,6 +2026,9 @@ uint16_t
|
||||
idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts)
|
||||
{
|
||||
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
|
||||
int ret;
|
||||
#endif
|
||||
int i;
|
||||
uint64_t ol_flags;
|
||||
struct rte_mbuf *m;
|
||||
@ -1938,12 +2043,31 @@ idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
rte_errno = EINVAL;
|
||||
return i;
|
||||
}
|
||||
} else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) ||
|
||||
(m->tso_segsz > IDPF_MAX_TSO_MSS) ||
|
||||
(m->pkt_len > IDPF_MAX_TSO_FRAME_SIZE)) {
|
||||
/* MSS outside the range are considered malicious */
|
||||
rte_errno = EINVAL;
|
||||
return i;
|
||||
}
|
||||
|
||||
if ((ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) != 0) {
|
||||
rte_errno = ENOTSUP;
|
||||
return i;
|
||||
}
|
||||
|
||||
if (m->pkt_len < IDPF_MIN_FRAME_SIZE) {
|
||||
rte_errno = EINVAL;
|
||||
return i;
|
||||
}
|
||||
|
||||
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
|
||||
ret = rte_validate_tx_offload(m);
|
||||
if (ret != 0) {
|
||||
rte_errno = -ret;
|
||||
return i;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return i;
|
||||
|
@ -23,6 +23,16 @@
|
||||
|
||||
#define IDPF_TX_MAX_MTU_SEG 10
|
||||
|
||||
#define IDPF_MIN_TSO_MSS 88
|
||||
#define IDPF_MAX_TSO_MSS 9728
|
||||
#define IDPF_MAX_TSO_FRAME_SIZE 262143
|
||||
#define IDPF_TX_MAX_MTU_SEG 10
|
||||
|
||||
#define IDPF_TX_OFFLOAD_MASK RTE_MBUF_F_TX_TCP_SEG
|
||||
|
||||
#define IDPF_TX_OFFLOAD_NOTSUP_MASK \
|
||||
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
|
||||
|
||||
#define IDPF_GET_PTYPE_SIZE(p) \
|
||||
(sizeof(struct virtchnl2_ptype) + \
|
||||
(((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0])))
|
||||
@ -115,6 +125,18 @@ struct idpf_tx_queue {
|
||||
struct idpf_tx_queue *complq;
|
||||
};
|
||||
|
||||
/* Offload features */
|
||||
union idpf_tx_offload {
|
||||
uint64_t data;
|
||||
struct {
|
||||
uint64_t l2_len:7; /* L2 (MAC) Header Length. */
|
||||
uint64_t l3_len:9; /* L3 (IP) Header Length. */
|
||||
uint64_t l4_len:8; /* L4 Header Length. */
|
||||
uint64_t tso_segsz:16; /* TCP TSO segment size */
|
||||
/* uint64_t unused : 24; */
|
||||
};
|
||||
};
|
||||
|
||||
struct idpf_rxq_ops {
|
||||
void (*release_mbufs)(struct idpf_rx_queue *rxq);
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user