net/sfc: support tunnel TSO on EF10 native Tx datapath

Handle VXLAN and GENEVE TSO on EF10 native Tx datapath.

Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
This commit is contained in:
Ivan Malov 2019-04-02 10:28:44 +01:00 committed by Ferruh Yigit
parent 9906cb2959
commit c1ce2ba218
7 changed files with 57 additions and 10 deletions

View File

@ -66,7 +66,7 @@ SFC EFX PMD has support for:
- Allmulticast mode
- TCP segmentation offload (TSO)
- TCP segmentation offload (TSO) including VXLAN and GENEVE encapsulated
- Multicast MAC filter

View File

@ -88,6 +88,7 @@ New Features
process.
* Added support for Rx packet types list in a secondary process.
* Added Tx prepare to do Tx offloads checks.
* Added support for VXLAN and GENEVE encapsulated TSO.
* **Updated Mellanox drivers.**

View File

@ -750,6 +750,12 @@ sfc_attach(struct sfc_adapter *sa)
sfc_info(sa, "TSO support isn't available on this adapter");
}
if (sa->tso && sa->priv.dp_tx->features & SFC_DP_TX_FEAT_TSO_ENCAP) {
sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled;
if (!sa->tso_encap)
sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
}
sfc_log_init(sa, "estimate resource limits");
rc = sfc_estimate_resource_limits(sa);
if (rc != 0)

View File

@ -286,6 +286,7 @@ struct sfc_adapter {
struct sfc_txq *txq_ctrl;
boolean_t tso;
boolean_t tso_encap;
uint32_t rxd_wait_timeout_ns;
};

View File

@ -163,6 +163,7 @@ struct sfc_dp_tx {
#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
#define SFC_DP_TX_FEAT_REFCNT 0x20
#define SFC_DP_TX_FEAT_TSO_ENCAP 0x40
sfc_dp_tx_get_dev_info_t *get_dev_info;
sfc_dp_tx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_tx_qcreate_t *qcreate;
@ -220,7 +221,22 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
if (m->ol_flags & PKT_TX_TCP_SEG) {
unsigned int tcph_off = m->l2_len + m->l3_len;
unsigned int header_len = tcph_off + m->l4_len;
unsigned int header_len;
switch (m->ol_flags & PKT_TX_TUNNEL_MASK) {
case 0:
break;
case PKT_TX_TUNNEL_VXLAN:
/* FALLTHROUGH */
case PKT_TX_TUNNEL_GENEVE:
if (!(m->ol_flags &
(PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
return EINVAL;
tcph_off += m->outer_l2_len + m->outer_l3_len;
}
header_len = tcph_off + m->l4_len;
if (unlikely(tcph_off > tso_tcp_header_offset_limit))
return EINVAL;

View File

@ -366,13 +366,16 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
unsigned int *added, unsigned int *dma_desc_space,
bool *reap_done)
{
size_t iph_off = m_seg->l2_len;
size_t tcph_off = m_seg->l2_len + m_seg->l3_len;
size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len;
size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ?
m_seg->outer_l2_len + m_seg->outer_l3_len : 0) +
m_seg->l2_len;
size_t tcph_off = iph_off + m_seg->l3_len;
size_t header_len = tcph_off + m_seg->l4_len;
/* Offset of the payload in the last segment that contains the header */
size_t in_off = 0;
const struct tcp_hdr *th;
uint16_t packet_id = 0;
uint16_t outer_packet_id = 0;
uint32_t sent_seq;
uint8_t *hdr_addr;
rte_iova_t hdr_iova;
@ -482,12 +485,16 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
if (first_m_seg->ol_flags & PKT_TX_IPV4)
packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off);
if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4)
outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr,
first_m_seg->outer_l2_len);
th = (const struct tcp_hdr *)(hdr_addr + tcph_off);
rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
sent_seq = rte_be_to_cpu_32(sent_seq);
sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, 0, sent_seq,
first_m_seg->tso_segsz);
sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id,
sent_seq, first_m_seg->tso_segsz);
(*added) += SFC_EF10_TSO_OPT_DESCS_NUM;
sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
@ -927,7 +934,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
if (txq->sw_ring == NULL)
goto fail_sw_ring_alloc;
if (info->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) {
txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
info->txq_entries,
SFC_TSOH_STD_LEN,
@ -1090,6 +1099,7 @@ struct sfc_dp_tx sfc_ef10_tx = {
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_TX_FEAT_TSO |
SFC_DP_TX_FEAT_TSO_ENCAP |
SFC_DP_TX_FEAT_MULTI_SEG |
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |

View File

@ -70,6 +70,10 @@ sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
if (sa->tso)
caps |= DEV_TX_OFFLOAD_TCP_TSO;
if (sa->tso_encap)
caps |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
return caps;
}
@ -469,7 +473,9 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
if (txq_info->offloads & DEV_TX_OFFLOAD_TCP_TSO)
if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
flags |= EFX_TXQ_FATSOV2;
rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
@ -588,18 +594,25 @@ int
sfc_tx_start(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
unsigned int sw_index;
int rc = 0;
sfc_log_init(sa, "txq_count = %u", sas->txq_count);
if (sa->tso) {
if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
if (!encp->enc_fw_assisted_tso_v2_enabled) {
sfc_warn(sa, "TSO support was unable to be restored");
sa->tso = B_FALSE;
sa->tso_encap = B_FALSE;
}
}
if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled) {
sfc_warn(sa, "Encapsulated TSO support was unable to be restored");
sa->tso_encap = B_FALSE;
}
rc = efx_tx_init(sa->nic);
if (rc != 0)
goto fail_efx_tx_init;