net/sfc: remove Tx DMA descriptor boundary crossing limit
EF10 supported by the PMD has no limitations on address boundary crossing by Tx DMA descriptors. Fixes:428c7ddd2f
("net/sfc: send bursts of packets") Fixes:fec33d5bb3
("net/sfc: support firmware-assisted TSO") Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
This commit is contained in:
parent
1e43fe3cb4
commit
676d11ffb2
@ -50,7 +50,7 @@ sfc_tso_alloc_tsoh_objs(struct sfc_tx_sw_desc *sw_ring,
|
||||
for (i = 0; i < txq_entries; ++i) {
|
||||
sw_ring[i].tsoh = rte_malloc_socket("sfc-txq-tsoh-obj",
|
||||
SFC_TSOH_STD_LEN,
|
||||
SFC_TX_SEG_BOUNDARY,
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
socket_id);
|
||||
if (sw_ring[i].tsoh == NULL)
|
||||
goto fail_alloc_tsoh_objs;
|
||||
@ -116,7 +116,6 @@ sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg,
|
||||
uint8_t *tsoh;
|
||||
const struct tcp_hdr *th;
|
||||
efsys_dma_addr_t header_paddr;
|
||||
efsys_dma_addr_t paddr_next_frag;
|
||||
uint16_t packet_id;
|
||||
uint32_t sent_seq;
|
||||
struct rte_mbuf *m = *in_seg;
|
||||
@ -140,17 +139,15 @@ sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg,
|
||||
return EMSGSIZE;
|
||||
|
||||
header_paddr = rte_pktmbuf_mtophys(m);
|
||||
paddr_next_frag = P2ROUNDUP(header_paddr + 1, SFC_TX_SEG_BOUNDARY);
|
||||
|
||||
/*
|
||||
* Sometimes headers may be split across multiple mbufs. In such cases
|
||||
* we need to glue those pieces and store them in some temporary place.
|
||||
* Also, packet headers must be contiguous in memory, so that
|
||||
* they can be referred to with a single DMA descriptor. Hence, handle
|
||||
* the case where the original header crosses a 4K memory boundary
|
||||
* they can be referred to with a single DMA descriptor. EF10 has no
|
||||
* limitations on address boundaries crossing by DMA descriptor data.
|
||||
*/
|
||||
if ((m->data_len < header_len) ||
|
||||
((paddr_next_frag - header_paddr) < header_len)) {
|
||||
if (m->data_len < header_len) {
|
||||
sfc_tso_prepare_header(txq, in_seg, in_off, idx, header_len);
|
||||
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
|
||||
|
||||
|
@ -137,6 +137,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
|
||||
uint16_t nb_tx_desc, unsigned int socket_id,
|
||||
const struct rte_eth_txconf *tx_conf)
|
||||
{
|
||||
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
|
||||
struct sfc_txq_info *txq_info;
|
||||
struct sfc_evq *evq;
|
||||
struct sfc_txq *txq;
|
||||
@ -195,6 +196,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
|
||||
txq->ptr_mask = txq_info->entries - 1;
|
||||
txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
|
||||
SFC_TX_DEFAULT_FREE_THRESH;
|
||||
txq->dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
|
||||
txq->hw_index = sw_index;
|
||||
txq->flags = tx_conf->txq_flags;
|
||||
txq->evq = evq;
|
||||
@ -302,10 +304,21 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
|
||||
int
|
||||
sfc_tx_init(struct sfc_adapter *sa)
|
||||
{
|
||||
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
|
||||
const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
|
||||
unsigned int sw_index;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* The datapath implementation assumes absence of boundary
|
||||
* limits on Tx DMA descriptors. Addition of these checks on
|
||||
* datapath would simply make the datapath slower.
|
||||
*/
|
||||
if (encp->enc_tx_dma_desc_boundary != 0) {
|
||||
rc = ENOTSUP;
|
||||
goto fail_tx_dma_desc_boundary;
|
||||
}
|
||||
|
||||
rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
|
||||
if (rc != 0)
|
||||
goto fail_check_mode;
|
||||
@ -334,6 +347,7 @@ sfc_tx_init(struct sfc_adapter *sa)
|
||||
sa->txq_count = 0;
|
||||
|
||||
fail_check_mode:
|
||||
fail_tx_dma_desc_boundary:
|
||||
sfc_log_init(sa, "failed (rc = %d)", rc);
|
||||
return rc;
|
||||
}
|
||||
@ -704,9 +718,13 @@ sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
efsys_dma_addr_t frag_addr = next_frag;
|
||||
size_t frag_len;
|
||||
|
||||
next_frag = RTE_ALIGN(frag_addr + 1,
|
||||
SFC_TX_SEG_BOUNDARY);
|
||||
frag_len = MIN(next_frag - frag_addr, seg_len);
|
||||
/*
|
||||
* It is assumed here that there is no
|
||||
* limitation on address boundary
|
||||
* crossing by DMA descriptor.
|
||||
*/
|
||||
frag_len = MIN(seg_len, txq->dma_desc_size_max);
|
||||
next_frag += frag_len;
|
||||
seg_len -= frag_len;
|
||||
pkt_len -= frag_len;
|
||||
|
||||
|
@ -39,12 +39,6 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* A segment must not cross 4K boundary
|
||||
* (this is a requirement of NIC TX descriptors)
|
||||
*/
|
||||
#define SFC_TX_SEG_BOUNDARY 4096
|
||||
|
||||
struct sfc_adapter;
|
||||
struct sfc_evq;
|
||||
|
||||
@ -79,6 +73,7 @@ struct sfc_txq {
|
||||
unsigned int completed;
|
||||
unsigned int free_thresh;
|
||||
uint16_t hw_vlan_tci;
|
||||
uint16_t dma_desc_size_max;
|
||||
|
||||
unsigned int hw_index;
|
||||
unsigned int flags;
|
||||
|
Loading…
Reference in New Issue
Block a user