2018-01-08 13:35:35 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2017-03-16 12:41:08 +00:00
|
|
|
*
|
2018-01-08 13:35:35 +00:00
|
|
|
* Copyright (c) 2016-2018 Solarflare Communications Inc.
|
2016-12-15 12:51:23 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This software was jointly developed between OKTET Labs (under contract
|
|
|
|
* for Solarflare) and Solarflare Communications, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rte_ip.h>
|
|
|
|
#include <rte_tcp.h>
|
|
|
|
|
|
|
|
#include "sfc.h"
|
|
|
|
#include "sfc_debug.h"
|
|
|
|
#include "sfc_tx.h"
|
|
|
|
#include "sfc_ev.h"
|
2018-10-05 14:47:01 +00:00
|
|
|
#include "sfc_tso.h"
|
2016-12-15 12:51:23 +00:00
|
|
|
|
|
|
|
int
|
2017-03-20 10:15:14 +00:00
|
|
|
sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
|
|
|
|
unsigned int txq_entries, unsigned int socket_id)
|
2016-12-15 12:51:23 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < txq_entries; ++i) {
|
2017-03-20 10:15:14 +00:00
|
|
|
sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
|
2016-12-15 12:51:23 +00:00
|
|
|
SFC_TSOH_STD_LEN,
|
2017-03-06 13:05:09 +00:00
|
|
|
RTE_CACHE_LINE_SIZE,
|
2016-12-15 12:51:23 +00:00
|
|
|
socket_id);
|
|
|
|
if (sw_ring[i].tsoh == NULL)
|
|
|
|
goto fail_alloc_tsoh_objs;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_alloc_tsoh_objs:
|
|
|
|
while (i > 0)
|
|
|
|
rte_free(sw_ring[--i].tsoh);
|
|
|
|
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-20 10:15:14 +00:00
|
|
|
sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
|
|
|
|
unsigned int txq_entries)
|
2016-12-15 12:51:23 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < txq_entries; ++i) {
|
|
|
|
rte_free(sw_ring[i].tsoh);
|
|
|
|
sw_ring[i].tsoh = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 14:47:01 +00:00
|
|
|
unsigned int
|
|
|
|
sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len,
|
|
|
|
struct rte_mbuf **in_seg, size_t *in_off)
|
2016-12-15 12:51:23 +00:00
|
|
|
{
|
|
|
|
struct rte_mbuf *m = *in_seg;
|
|
|
|
size_t bytes_to_copy = 0;
|
2018-10-05 14:47:01 +00:00
|
|
|
size_t bytes_left = header_len;
|
|
|
|
unsigned int segments_copied = 0;
|
2016-12-15 12:51:23 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
bytes_to_copy = MIN(bytes_left, m->data_len);
|
|
|
|
|
|
|
|
rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
|
|
|
|
bytes_to_copy);
|
|
|
|
|
|
|
|
bytes_left -= bytes_to_copy;
|
|
|
|
tsoh += bytes_to_copy;
|
|
|
|
|
|
|
|
if (bytes_left > 0) {
|
|
|
|
m = m->next;
|
|
|
|
SFC_ASSERT(m != NULL);
|
2018-10-05 14:47:01 +00:00
|
|
|
segments_copied++;
|
2016-12-15 12:51:23 +00:00
|
|
|
}
|
|
|
|
} while (bytes_left > 0);
|
|
|
|
|
|
|
|
if (bytes_to_copy == m->data_len) {
|
|
|
|
*in_seg = m->next;
|
|
|
|
*in_off = 0;
|
2018-10-05 14:47:01 +00:00
|
|
|
segments_copied++;
|
2016-12-15 12:51:23 +00:00
|
|
|
} else {
|
|
|
|
*in_seg = m;
|
|
|
|
*in_off = bytes_to_copy;
|
|
|
|
}
|
2018-10-05 14:47:01 +00:00
|
|
|
|
|
|
|
return segments_copied;
|
2016-12-15 12:51:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-03-20 10:15:14 +00:00
|
|
|
sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
|
|
|
|
struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
|
|
|
|
unsigned int *pkt_descs, size_t *pkt_len)
|
2016-12-15 12:51:23 +00:00
|
|
|
{
|
|
|
|
uint8_t *tsoh;
|
2019-05-21 16:13:13 +00:00
|
|
|
const struct rte_tcp_hdr *th;
|
2016-12-15 12:51:23 +00:00
|
|
|
efsys_dma_addr_t header_paddr;
|
2019-04-02 09:28:36 +00:00
|
|
|
uint16_t packet_id = 0;
|
2016-12-15 12:51:23 +00:00
|
|
|
uint32_t sent_seq;
|
|
|
|
struct rte_mbuf *m = *in_seg;
|
|
|
|
size_t nh_off = m->l2_len; /* IP header offset */
|
|
|
|
size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
|
|
|
|
size_t header_len = m->l2_len + m->l3_len + m->l4_len;
|
|
|
|
|
2019-04-02 09:28:35 +00:00
|
|
|
idx += SFC_EF10_TSO_OPT_DESCS_NUM;
|
2016-12-15 12:51:23 +00:00
|
|
|
|
2017-11-05 23:22:55 +00:00
|
|
|
header_paddr = rte_pktmbuf_iova(m);
|
2016-12-15 12:51:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Sometimes headers may be split across multiple mbufs. In such cases
|
|
|
|
* we need to glue those pieces and store them in some temporary place.
|
|
|
|
* Also, packet headers must be contiguous in memory, so that
|
2017-03-06 13:05:09 +00:00
|
|
|
* they can be referred to with a single DMA descriptor. EF10 has no
|
|
|
|
* limitations on address boundaries crossing by DMA descriptor data.
|
2016-12-15 12:51:23 +00:00
|
|
|
*/
|
2017-03-06 13:05:09 +00:00
|
|
|
if (m->data_len < header_len) {
|
2019-04-02 09:28:33 +00:00
|
|
|
/*
|
|
|
|
* Discard a packet if header linearization is needed but
|
|
|
|
* the header is too big.
|
2019-04-02 09:28:41 +00:00
|
|
|
* Duplicate Tx prepare check here to avoid spoil of
|
|
|
|
* memory if Tx prepare is skipped.
|
2019-04-02 09:28:33 +00:00
|
|
|
*/
|
|
|
|
if (unlikely(header_len > SFC_TSOH_STD_LEN))
|
|
|
|
return EMSGSIZE;
|
|
|
|
|
2016-12-15 12:51:23 +00:00
|
|
|
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
|
2018-10-05 14:47:01 +00:00
|
|
|
sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off);
|
2016-12-15 12:51:23 +00:00
|
|
|
|
2017-11-04 01:22:21 +00:00
|
|
|
header_paddr = rte_malloc_virt2iova((void *)tsoh);
|
2016-12-15 12:51:23 +00:00
|
|
|
} else {
|
|
|
|
if (m->data_len == header_len) {
|
|
|
|
*in_off = 0;
|
|
|
|
*in_seg = m->next;
|
|
|
|
} else {
|
|
|
|
*in_off = header_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
tsoh = rte_pktmbuf_mtod(m, uint8_t *);
|
|
|
|
}
|
|
|
|
|
2019-04-02 09:28:36 +00:00
|
|
|
/*
|
|
|
|
* Handle IP header. Tx prepare has debug-only checks that offload flags
|
|
|
|
* are correctly filled in in TSO mbuf. Use zero IPID if there is no
|
|
|
|
* IPv4 flag. If the packet is still IPv4, HW will simply start from
|
|
|
|
* zero IPID.
|
|
|
|
*/
|
2019-04-02 09:28:42 +00:00
|
|
|
if (m->ol_flags & PKT_TX_IPV4)
|
|
|
|
packet_id = sfc_tso_ip4_get_ipid(tsoh, nh_off);
|
2016-12-15 12:51:23 +00:00
|
|
|
|
|
|
|
/* Handle TCP header */
|
2019-05-21 16:13:13 +00:00
|
|
|
th = (const struct rte_tcp_hdr *)(tsoh + tcph_off);
|
2016-12-15 12:51:23 +00:00
|
|
|
|
|
|
|
rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
|
|
|
|
sent_seq = rte_be_to_cpu_32(sent_seq);
|
|
|
|
|
2018-02-20 07:34:35 +00:00
|
|
|
efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq,
|
|
|
|
m->tso_segsz,
|
2016-12-15 12:51:23 +00:00
|
|
|
*pend, EFX_TX_FATSOV2_OPT_NDESCS);
|
|
|
|
|
|
|
|
*pend += EFX_TX_FATSOV2_OPT_NDESCS;
|
|
|
|
*pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
|
|
|
|
|
|
|
|
efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
|
|
|
|
B_FALSE, (*pend)++);
|
|
|
|
(*pkt_descs)++;
|
|
|
|
*pkt_len -= header_len;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|