net/sfc: move TSO header checks from Tx burst to Tx prepare

Tx offloads checks should be done in Tx prepare.

Signed-off-by: Igor Romanov <igor.romanov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
This commit is contained in:
Igor Romanov 2019-04-02 10:28:39 +01:00 committed by Ferruh Yigit
parent 8c27fa78f1
commit a3895ef38c
4 changed files with 23 additions and 27 deletions

View File

@ -195,7 +195,8 @@ sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
static inline int
sfc_dp_tx_prepare_pkt(struct rte_mbuf *m)
sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
uint32_t tso_tcp_header_offset_limit)
{
#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
int ret;
@ -209,10 +210,15 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m)
SFC_ASSERT(ret < 0);
return -ret;
}
#else
RTE_SET_USED(m);
#endif
if (m->ol_flags & PKT_TX_TCP_SEG) {
unsigned int tcph_off = m->l2_len + m->l3_len;
if (unlikely(tcph_off > tso_tcp_header_offset_limit))
return EINVAL;
}
return 0;
}

View File

@ -320,9 +320,10 @@ sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
}
static uint16_t
sfc_ef10_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
uint16_t i;
for (i = 0; i < nb_pkts; i++) {
@ -347,7 +348,8 @@ sfc_ef10_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
#endif
ret = sfc_dp_tx_prepare_pkt(m);
ret = sfc_dp_tx_prepare_pkt(m,
txq->tso_tcp_header_offset_limit);
if (unlikely(ret != 0)) {
rte_errno = ret;
break;
@ -378,9 +380,6 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
bool eop;
if (unlikely(tcph_off > txq->tso_tcp_header_offset_limit))
return EMSGSIZE;
/*
* Preliminary estimation of required DMA descriptors, including extra
* descriptor for TSO header that is needed when the header is

View File

@ -103,18 +103,9 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
size_t nh_off = m->l2_len; /* IP header offset */
size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
size_t header_len = m->l2_len + m->l3_len + m->l4_len;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
idx += SFC_EF10_TSO_OPT_DESCS_NUM;
/*
* The TCP header must start at most 208 bytes into the frame.
* If it starts later than this then the NIC won't realise
* it's a TCP packet and TSO edits won't be applied
*/
if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
return EMSGSIZE;
header_paddr = rte_pktmbuf_iova(m);
/*

View File

@ -698,15 +698,19 @@ sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
}
static uint16_t
sfc_efx_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct sfc_dp_txq *dp_txq = tx_queue;
struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
uint16_t i;
for (i = 0; i < nb_pkts; i++) {
int ret;
ret = sfc_dp_tx_prepare_pkt(tx_pkts[i]);
ret = sfc_dp_tx_prepare_pkt(tx_pkts[i],
encp->enc_tx_tso_tcp_header_offset_limit);
if (unlikely(ret != 0)) {
rte_errno = ret;
break;
@ -776,14 +780,10 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
*/
if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
&pkt_descs, &pkt_len) != 0) {
/* We may have reached this place for
* one of the following reasons:
*
* 1) Packet header linearization is needed
* and the header length is greater
* than SFC_TSOH_STD_LEN
* 2) TCP header starts at more then
* 208 bytes into the frame
/* We may have reached this place if packet
* header linearization is needed but the
* header length is greater than
* SFC_TSOH_STD_LEN
*
* We will deceive RTE saying that we have sent
* the packet, but we will actually drop it.