net/ice: fix Tx when TSO is enabled

Hardware limits that max buffer size per Tx descriptor should be
(16K-1)B. So when TSO enabled, the mbuf data size may exceed the
limit and cause malicious behavior to the NIC. This patch fixes
this issue by using more Tx descs for this kind of large buffer.

Fixes: 17c7d0f9d6a4 ("net/ice: support basic Rx/Tx")
Cc: stable@dpdk.org

Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Xiaoyun Li 2019-12-26 14:54:28 +08:00 committed by Ferruh Yigit
parent e1b8ed33c5
commit f1514bcb27

View File

@ -2421,6 +2421,24 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
return ctx_desc;
}
/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
#define ICE_MAX_DATA_PER_TXD \
(ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
/* Calculate the number of TX descriptors needed for each pkt */
static inline uint16_t
ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
{
struct rte_mbuf *txd = tx_pkt;
uint16_t count = 0;
while (txd != NULL) {
count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
txd = txd->next;
}
return count;
}
uint16_t
ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@ -2440,6 +2458,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint32_t td_offset = 0;
uint32_t td_tag = 0;
uint16_t tx_last;
uint16_t slen;
uint64_t buf_dma_addr;
uint64_t ol_flags;
union ice_tx_offload tx_offload = {0};
@ -2471,8 +2490,15 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* The number of descriptors that must be allocated for
* a packet equals to the number of the segments of that
* packet plus the number of context descriptor if needed.
* Recalculate the needed tx descs when TSO enabled in case
* the mbuf data size exceeds max data size that hw allows
* per tx desc.
*/
nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
if (ol_flags & PKT_TX_TCP_SEG)
nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
nb_ctx);
else
nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
tx_last = (uint16_t)(tx_id + nb_used - 1);
/* Circular ring */
@ -2562,15 +2588,37 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txe->mbuf = m_seg;
/* Setup TX Descriptor */
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
while ((ol_flags & PKT_TX_TCP_SEG) &&
unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
((uint64_t)ICE_MAX_DATA_PER_TXD <<
ICE_TXD_QW1_TX_BUF_SZ_S) |
((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
buf_dma_addr += ICE_MAX_DATA_PER_TXD;
slen -= ICE_MAX_DATA_PER_TXD;
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
txd = &tx_ring[tx_id];
txn = &sw_ring[txe->next_id];
}
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz =
rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
((uint64_t)m_seg->data_len <<
ICE_TXD_QW1_TX_BUF_SZ_S) |
((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
txe->last_id = tx_last;
tx_id = txe->next_id;