net/sfc: factor out libefx-based Tx datapath

Split control and datapath to make datapath substitutable and
possibly reusable with alternative control path.

libefx-based Tx datapath is bound to libefx control path, but
it should be possible to use other datapaths with alternative
control path(s).

Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
This commit is contained in:
Andrew Rybchenko 2017-03-20 10:15:14 +00:00 committed by Ferruh Yigit
parent 638bddc99f
commit dbdc82416b
13 changed files with 564 additions and 144 deletions

View File

@ -236,6 +236,14 @@ boolean parameters value.
more efficient than libefx-based and provides richer packet type more efficient than libefx-based and provides richer packet type
classification, but lacks Rx scatter support. classification, but lacks Rx scatter support.
- ``tx_datapath`` [auto|efx] (default **auto**)
Choose transmit datapath implementation.
**auto** allows the driver itself to make a choice based on firmware
features available and required by the datapath implementation.
**efx** chooses libefx-based datapath which supports VLAN insertion
(full-feature firmware variant only), TSO and multi-segment mbufs.
- ``perf_profile`` [auto|throughput|low-latency] (default **throughput**) - ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)
Choose hardware tunning to be optimized for either throughput or Choose hardware tunning to be optimized for either throughput or

View File

@ -228,6 +228,7 @@ struct sfc_adapter {
#endif #endif
const struct sfc_dp_rx *dp_rx; const struct sfc_dp_rx *dp_rx;
const struct sfc_dp_tx *dp_tx;
}; };
/* /*

View File

@ -87,7 +87,9 @@ sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) { if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,
"sfc %s dapapath '%s' already registered\n", "sfc %s dapapath '%s' already registered\n",
entry->type == SFC_DP_RX ? "Rx" : "unknown", entry->type == SFC_DP_RX ? "Rx" :
entry->type == SFC_DP_TX ? "Tx" :
"unknown",
entry->name); entry->name);
return EEXIST; return EEXIST;
} }

View File

@ -56,6 +56,7 @@ typedef void (sfc_dp_exception_t)(void *ctrl);
enum sfc_dp_type { enum sfc_dp_type {
SFC_DP_RX = 0, /**< Receive datapath */ SFC_DP_RX = 0, /**< Receive datapath */
SFC_DP_TX, /**< Transmit datapath */
}; };

148
drivers/net/sfc/sfc_dp_tx.h Normal file
View File

@ -0,0 +1,148 @@
/*-
* BSD LICENSE
*
* Copyright (c) 2016 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_DP_TX_H
#define _SFC_DP_TX_H
#include <rte_ethdev.h>
#include "sfc_dp.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Generic transmit queue information used on data path.
* It must be kept as small as it is possible since it is built into
* the structure used on datapath.
*/
struct sfc_dp_txq {
struct sfc_dp_queue dpq;
};
/**
* Datapath transmit queue creation information.
*
* The structure is used just to pass information from control path to
* datapath. It could be just function arguments, but it would be hardly
* readable.
*/
struct sfc_dp_tx_qcreate_info {
/** Minimum number of unused Tx descriptors to do reap */
unsigned int free_thresh;
/** Transmit queue configuration flags */
unsigned int flags;
/** Tx queue size */
unsigned int txq_entries;
/** Maximum size of data in the DMA descriptor */
uint16_t dma_desc_size_max;
};
/**
* Allocate and initialize datapath transmit queue.
*
* @param port_id The port identifier
* @param queue_id The queue identifier
* @param pci_addr PCI function address
* @param socket_id Socket identifier to allocate memory
* @param info Tx queue details wrapped in structure
* @param dp_txqp Location for generic datapath transmit queue pointer
*
* @return 0 or positive errno.
*/
typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
const struct rte_pci_addr *pci_addr,
int socket_id,
const struct sfc_dp_tx_qcreate_info *info,
struct sfc_dp_txq **dp_txqp);
/**
* Free resources allocated for datapath transmit queue.
*/
typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq);
/**
* Transmit queue start callback.
*
* It handovers EvQ to the datapath.
*/
typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq,
unsigned int evq_read_ptr,
unsigned int txq_desc_index);
/**
* Transmit queue stop function called before the queue flush.
*
* It returns EvQ to the control path.
*/
typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
unsigned int *evq_read_ptr);
/**
* Transmit queue function called after the queue flush.
*/
typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
/** Transmit datapath definition */
struct sfc_dp_tx {
struct sfc_dp dp;
sfc_dp_tx_qcreate_t *qcreate;
sfc_dp_tx_qdestroy_t *qdestroy;
sfc_dp_tx_qstart_t *qstart;
sfc_dp_tx_qstop_t *qstop;
sfc_dp_tx_qreap_t *qreap;
eth_tx_burst_t pkt_burst;
};
static inline struct sfc_dp_tx *
sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name)
{
struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name);
return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
}
static inline struct sfc_dp_tx *
sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
{
struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps);
return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
}
extern struct sfc_dp_tx sfc_efx_tx;
#ifdef __cplusplus
}
#endif
#endif /* _SFC_DP_TX_H */

View File

@ -467,7 +467,7 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
if (rc != 0) if (rc != 0)
goto fail_tx_qinit; goto fail_tx_qinit;
dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq; dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp;
sfc_adapter_unlock(sa); sfc_adapter_unlock(sa);
return 0; return 0;
@ -481,13 +481,15 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
static void static void
sfc_tx_queue_release(void *queue) sfc_tx_queue_release(void *queue)
{ {
struct sfc_txq *txq = queue; struct sfc_dp_txq *dp_txq = queue;
struct sfc_txq *txq;
unsigned int sw_index; unsigned int sw_index;
struct sfc_adapter *sa; struct sfc_adapter *sa;
if (txq == NULL) if (dp_txq == NULL)
return; return;
txq = sfc_txq_by_dp_txq(dp_txq);
sw_index = sfc_txq_sw_index(txq); sw_index = sfc_txq_sw_index(txq);
SFC_ASSERT(txq->evq != NULL); SFC_ASSERT(txq->evq != NULL);
@ -1361,6 +1363,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
struct sfc_adapter *sa = dev->data->dev_private; struct sfc_adapter *sa = dev->data->dev_private;
unsigned int avail_caps = 0; unsigned int avail_caps = 0;
const char *rx_name = NULL; const char *rx_name = NULL;
const char *tx_name = NULL;
int rc; int rc;
if (sa == NULL || sa->state == SFC_ADAPTER_UNINITIALIZED) if (sa == NULL || sa->state == SFC_ADAPTER_UNINITIALIZED)
@ -1408,12 +1411,45 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
dev->rx_pkt_burst = sa->dp_rx->pkt_burst; dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
dev->tx_pkt_burst = sfc_xmit_pkts; rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
sfc_kvarg_string_handler, &tx_name);
if (rc != 0)
goto fail_kvarg_tx_datapath;
if (tx_name != NULL) {
sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
if (sa->dp_tx == NULL) {
sfc_err(sa, "Tx datapath %s not found", tx_name);
rc = ENOENT;
goto fail_dp_tx;
}
if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) {
sfc_err(sa,
"Insufficient Hw/FW capabilities to use Tx datapath %s",
tx_name);
rc = EINVAL;
goto fail_dp_tx;
}
} else {
sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
if (sa->dp_tx == NULL) {
sfc_err(sa, "Tx datapath by caps %#x not found",
avail_caps);
rc = ENOENT;
goto fail_dp_tx;
}
}
sfc_info(sa, "use %s Tx datapath", sa->dp_tx->dp.name);
dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
dev->dev_ops = &sfc_eth_dev_ops; dev->dev_ops = &sfc_eth_dev_ops;
return 0; return 0;
fail_dp_tx:
fail_kvarg_tx_datapath:
fail_dp_rx: fail_dp_rx:
fail_kvarg_rx_datapath: fail_kvarg_rx_datapath:
return rc; return rc;
@ -1427,6 +1463,8 @@ sfc_register_dp(void)
/* Prefer EF10 datapath */ /* Prefer EF10 datapath */
sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
} }
} }
@ -1563,6 +1601,7 @@ RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio"); RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> " SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "

View File

@ -183,16 +183,18 @@ static boolean_t
sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
{ {
struct sfc_evq *evq = arg; struct sfc_evq *evq = arg;
struct sfc_txq *txq; struct sfc_dp_txq *dp_txq;
struct sfc_efx_txq *txq;
unsigned int stop; unsigned int stop;
unsigned int delta; unsigned int delta;
txq = evq->txq; dp_txq = evq->dp_txq;
SFC_ASSERT(dp_txq != NULL);
SFC_ASSERT(txq != NULL); txq = sfc_efx_txq_by_dp_txq(dp_txq);
SFC_ASSERT(txq->evq == evq); SFC_ASSERT(txq->evq == evq);
if (unlikely((txq->state & SFC_TXQ_STARTED) == 0)) if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
goto done; goto done;
stop = (id + 1) & txq->ptr_mask; stop = (id + 1) & txq->ptr_mask;
@ -305,9 +307,13 @@ static boolean_t
sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
{ {
struct sfc_evq *evq = arg; struct sfc_evq *evq = arg;
struct sfc_dp_txq *dp_txq;
struct sfc_txq *txq; struct sfc_txq *txq;
txq = evq->txq; dp_txq = evq->dp_txq;
SFC_ASSERT(dp_txq != NULL);
txq = sfc_txq_by_dp_txq(dp_txq);
SFC_ASSERT(txq != NULL); SFC_ASSERT(txq != NULL);
SFC_ASSERT(txq->hw_index == txq_hw_index); SFC_ASSERT(txq->hw_index == txq_hw_index);
SFC_ASSERT(txq->evq == evq); SFC_ASSERT(txq->evq == evq);
@ -441,7 +447,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
.eec_link_change = sfc_ev_nop_link_change, .eec_link_change = sfc_ev_nop_link_change,
}; };
static const efx_ev_callbacks_t sfc_ev_callbacks_tx = { static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
.eec_initialized = sfc_ev_initialized, .eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_nop_rx, .eec_rx = sfc_ev_nop_rx,
.eec_tx = sfc_ev_tx, .eec_tx = sfc_ev_tx,
@ -456,6 +462,21 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_tx = {
.eec_link_change = sfc_ev_nop_link_change, .eec_link_change = sfc_ev_nop_link_change,
}; };
static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_nop_rx,
.eec_tx = sfc_ev_nop_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
.eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
.eec_txq_flush_done = sfc_ev_txq_flush_done,
.eec_software = sfc_ev_software,
.eec_sram = sfc_ev_sram,
.eec_wake_up = sfc_ev_wake_up,
.eec_timer = sfc_ev_timer,
.eec_link_change = sfc_ev_nop_link_change,
};
void void
sfc_ev_qpoll(struct sfc_evq *evq) sfc_ev_qpoll(struct sfc_evq *evq)
@ -487,8 +508,10 @@ sfc_ev_qpoll(struct sfc_evq *evq)
rxq_sw_index); rxq_sw_index);
} }
if (evq->txq != NULL) { if (evq->dp_txq != NULL) {
unsigned int txq_sw_index = sfc_txq_sw_index(evq->txq); unsigned int txq_sw_index;
txq_sw_index = evq->dp_txq->dpq.queue_id;
sfc_warn(sa, sfc_warn(sa,
"restart TxQ %u because of exception on its EvQ %u", "restart TxQ %u because of exception on its EvQ %u",
@ -558,14 +581,17 @@ sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
if (rc != 0) if (rc != 0)
goto fail_ev_qcreate; goto fail_ev_qcreate;
SFC_ASSERT(evq->dp_rxq == NULL || evq->txq == NULL); SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
if (evq->dp_rxq != 0) { if (evq->dp_rxq != 0) {
if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
evq->callbacks = &sfc_ev_callbacks_efx_rx; evq->callbacks = &sfc_ev_callbacks_efx_rx;
else else
evq->callbacks = &sfc_ev_callbacks_dp_rx; evq->callbacks = &sfc_ev_callbacks_dp_rx;
} else if (evq->txq != 0) { } else if (evq->dp_txq != 0) {
evq->callbacks = &sfc_ev_callbacks_tx; if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
evq->callbacks = &sfc_ev_callbacks_efx_tx;
else
evq->callbacks = &sfc_ev_callbacks_dp_tx;
} else { } else {
evq->callbacks = &sfc_ev_callbacks; evq->callbacks = &sfc_ev_callbacks;
} }

View File

@ -32,8 +32,12 @@
#ifndef _SFC_EV_H_ #ifndef _SFC_EV_H_
#define _SFC_EV_H_ #define _SFC_EV_H_
#include <rte_ethdev.h>
#include "efx.h" #include "efx.h"
#include "sfc.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
@ -43,7 +47,7 @@ extern "C" {
struct sfc_adapter; struct sfc_adapter;
struct sfc_dp_rxq; struct sfc_dp_rxq;
struct sfc_txq; struct sfc_dp_txq;
enum sfc_evq_state { enum sfc_evq_state {
SFC_EVQ_UNINITIALIZED = 0, SFC_EVQ_UNINITIALIZED = 0,
@ -62,7 +66,7 @@ struct sfc_evq {
boolean_t exception; boolean_t exception;
efsys_mem_t mem; efsys_mem_t mem;
struct sfc_dp_rxq *dp_rxq; struct sfc_dp_rxq *dp_rxq;
struct sfc_txq *txq; struct sfc_dp_txq *dp_txq;
/* Not used on datapath */ /* Not used on datapath */
struct sfc_adapter *sa; struct sfc_adapter *sa;

View File

@ -49,6 +49,7 @@ sfc_kvargs_parse(struct sfc_adapter *sa)
SFC_KVARG_MCDI_LOGGING, SFC_KVARG_MCDI_LOGGING,
SFC_KVARG_PERF_PROFILE, SFC_KVARG_PERF_PROFILE,
SFC_KVARG_RX_DATAPATH, SFC_KVARG_RX_DATAPATH,
SFC_KVARG_TX_DATAPATH,
NULL, NULL,
}; };

View File

@ -64,6 +64,10 @@ extern "C" {
"[" SFC_KVARG_DATAPATH_EFX "|" \ "[" SFC_KVARG_DATAPATH_EFX "|" \
SFC_KVARG_DATAPATH_EF10 "]" SFC_KVARG_DATAPATH_EF10 "]"
#define SFC_KVARG_TX_DATAPATH "tx_datapath"
#define SFC_KVARG_VALUES_TX_DATAPATH \
"[" SFC_KVARG_DATAPATH_EFX "]"
struct sfc_adapter; struct sfc_adapter;
int sfc_kvargs_parse(struct sfc_adapter *sa); int sfc_kvargs_parse(struct sfc_adapter *sa);

View File

@ -44,13 +44,13 @@
#define SFC_TSO_OPDESCS_IDX_SHIFT 2 #define SFC_TSO_OPDESCS_IDX_SHIFT 2
int int
sfc_tso_alloc_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
unsigned int txq_entries, unsigned int socket_id) unsigned int txq_entries, unsigned int socket_id)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < txq_entries; ++i) { for (i = 0; i < txq_entries; ++i) {
sw_ring[i].tsoh = rte_malloc_socket("sfc-txq-tsoh-obj", sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
SFC_TSOH_STD_LEN, SFC_TSOH_STD_LEN,
RTE_CACHE_LINE_SIZE, RTE_CACHE_LINE_SIZE,
socket_id); socket_id);
@ -68,7 +68,8 @@ sfc_tso_alloc_tsoh_objs(struct sfc_tx_sw_desc *sw_ring,
} }
void void
sfc_tso_free_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, unsigned int txq_entries) sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
unsigned int txq_entries)
{ {
unsigned int i; unsigned int i;
@ -79,7 +80,7 @@ sfc_tso_free_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, unsigned int txq_entries)
} }
static void static void
sfc_tso_prepare_header(struct sfc_txq *txq, struct rte_mbuf **in_seg, sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg,
size_t *in_off, unsigned int idx, size_t bytes_left) size_t *in_off, unsigned int idx, size_t bytes_left)
{ {
struct rte_mbuf *m = *in_seg; struct rte_mbuf *m = *in_seg;
@ -111,9 +112,9 @@ sfc_tso_prepare_header(struct sfc_txq *txq, struct rte_mbuf **in_seg,
} }
int int
sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg, sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
size_t *in_off, efx_desc_t **pend, unsigned int *pkt_descs, struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
size_t *pkt_len) unsigned int *pkt_descs, size_t *pkt_len)
{ {
uint8_t *tsoh; uint8_t *tsoh;
const struct tcp_hdr *th; const struct tcp_hdr *th;
@ -150,7 +151,8 @@ sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg,
* limitations on address boundaries crossing by DMA descriptor data. * limitations on address boundaries crossing by DMA descriptor data.
*/ */
if (m->data_len < header_len) { if (m->data_len < header_len) {
sfc_tso_prepare_header(txq, in_seg, in_off, idx, header_len); sfc_efx_tso_prepare_header(txq, in_seg, in_off, idx,
header_len);
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh; tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
header_paddr = rte_malloc_virt2phy((void *)tsoh); header_paddr = rte_malloc_virt2phy((void *)tsoh);

View File

@ -35,6 +35,7 @@
#include "sfc_ev.h" #include "sfc_ev.h"
#include "sfc_tx.h" #include "sfc_tx.h"
#include "sfc_tweak.h" #include "sfc_tweak.h"
#include "sfc_kvargs.h"
/* /*
* Maximum number of TX queue flush attempts in case of * Maximum number of TX queue flush attempts in case of
@ -111,29 +112,6 @@ sfc_tx_qflush_done(struct sfc_txq *txq)
txq->state &= ~SFC_TXQ_FLUSHING; txq->state &= ~SFC_TXQ_FLUSHING;
} }
static void
sfc_tx_reap(struct sfc_txq *txq)
{
unsigned int completed;
sfc_ev_qpoll(txq->evq);
for (completed = txq->completed;
completed != txq->pending; completed++) {
struct sfc_tx_sw_desc *txd;
txd = &txq->sw_ring[completed & txq->ptr_mask];
if (txd->mbuf != NULL) {
rte_pktmbuf_free(txd->mbuf);
txd->mbuf = NULL;
}
}
txq->completed = completed;
}
int int
sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
uint16_t nb_tx_desc, unsigned int socket_id, uint16_t nb_tx_desc, unsigned int socket_id,
@ -145,6 +123,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
struct sfc_txq *txq; struct sfc_txq *txq;
unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index); unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index);
int rc = 0; int rc = 0;
struct sfc_dp_tx_qcreate_info info;
sfc_log_init(sa, "TxQ = %u", sw_index); sfc_log_init(sa, "TxQ = %u", sw_index);
@ -169,57 +148,45 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
if (txq == NULL) if (txq == NULL)
goto fail_txq_alloc; goto fail_txq_alloc;
txq_info->txq = txq;
txq->hw_index = sw_index;
txq->evq = evq;
txq->free_thresh =
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
txq->flags = tx_conf->txq_flags;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries), rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
socket_id, &txq->mem); socket_id, &txq->mem);
if (rc != 0) if (rc != 0)
goto fail_dma_alloc; goto fail_dma_alloc;
rc = ENOMEM; memset(&info, 0, sizeof(info));
txq->pend_desc = rte_calloc_socket("sfc-txq-pend-desc", info.free_thresh = txq->free_thresh;
EFX_TXQ_LIMIT(txq_info->entries), info.flags = tx_conf->txq_flags;
sizeof(efx_desc_t), 0, socket_id); info.txq_entries = txq_info->entries;
if (txq->pend_desc == NULL) info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
goto fail_pend_desc_alloc;
rc = ENOMEM; rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
txq->sw_ring = rte_calloc_socket("sfc-txq-desc", txq_info->entries, &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
sizeof(*txq->sw_ring), 0, socket_id); socket_id, &info, &txq->dp);
if (txq->sw_ring == NULL)
goto fail_desc_alloc;
if (sa->tso) {
rc = sfc_tso_alloc_tsoh_objs(txq->sw_ring, txq_info->entries,
socket_id);
if (rc != 0) if (rc != 0)
goto fail_alloc_tsoh_objs; goto fail_dp_tx_qinit;
}
evq->dp_txq = txq->dp;
txq->state = SFC_TXQ_INITIALIZED; txq->state = SFC_TXQ_INITIALIZED;
txq->ptr_mask = txq_info->entries - 1;
txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
txq->dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
txq->hw_index = sw_index;
txq->flags = tx_conf->txq_flags;
txq->evq = evq;
evq->txq = txq;
txq_info->txq = txq;
txq_info->deferred_start = (tx_conf->tx_deferred_start != 0); txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
return 0; return 0;
fail_alloc_tsoh_objs: fail_dp_tx_qinit:
rte_free(txq->sw_ring);
fail_desc_alloc:
rte_free(txq->pend_desc);
fail_pend_desc_alloc:
sfc_dma_free(sa, &txq->mem); sfc_dma_free(sa, &txq->mem);
fail_dma_alloc: fail_dma_alloc:
txq_info->txq = NULL;
rte_free(txq); rte_free(txq);
fail_txq_alloc: fail_txq_alloc:
@ -248,13 +215,12 @@ sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
SFC_ASSERT(txq != NULL); SFC_ASSERT(txq != NULL);
SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED); SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
sfc_tso_free_tsoh_objs(txq->sw_ring, txq_info->entries); sa->dp_tx->qdestroy(txq->dp);
txq->dp = NULL;
txq_info->txq = NULL; txq_info->txq = NULL;
txq_info->entries = 0; txq_info->entries = 0;
rte_free(txq->sw_ring);
rte_free(txq->pend_desc);
sfc_dma_free(sa, &txq->mem); sfc_dma_free(sa, &txq->mem);
rte_free(txq); rte_free(txq);
} }
@ -421,12 +387,13 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
goto fail_tx_qcreate; goto fail_tx_qcreate;
} }
txq->added = txq->pending = txq->completed = desc_index;
txq->hw_vlan_tci = 0;
efx_tx_qenable(txq->common); efx_tx_qenable(txq->common);
txq->state |= (SFC_TXQ_STARTED | SFC_TXQ_RUNNING); txq->state |= SFC_TXQ_STARTED;
rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
if (rc != 0)
goto fail_dp_qstart;
/* /*
* It seems to be used by DPDK for debug purposes only ('rte_ether') * It seems to be used by DPDK for debug purposes only ('rte_ether')
@ -436,6 +403,10 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
return 0; return 0;
fail_dp_qstart:
txq->state = SFC_TXQ_INITIALIZED;
efx_tx_qdestroy(txq->common);
fail_tx_qcreate: fail_tx_qcreate:
sfc_ev_qstop(sa, evq->evq_index); sfc_ev_qstop(sa, evq->evq_index);
@ -451,7 +422,6 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_txq *txq; struct sfc_txq *txq;
unsigned int retry_count; unsigned int retry_count;
unsigned int wait_count; unsigned int wait_count;
unsigned int txds;
sfc_log_init(sa, "TxQ = %u", sw_index); sfc_log_init(sa, "TxQ = %u", sw_index);
@ -465,7 +435,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
SFC_ASSERT(txq->state & SFC_TXQ_STARTED); SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
txq->state &= ~SFC_TXQ_RUNNING; sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
/* /*
* Retry TX queue flushing in case of flush failed or * Retry TX queue flushing in case of flush failed or
@ -500,14 +470,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
sfc_info(sa, "TxQ %u flushed", sw_index); sfc_info(sa, "TxQ %u flushed", sw_index);
} }
sfc_tx_reap(txq); sa->dp_tx->qreap(txq->dp);
for (txds = 0; txds < txq_info->entries; txds++) {
if (txq->sw_ring[txds].mbuf != NULL) {
rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
txq->sw_ring[txds].mbuf = NULL;
}
}
txq->state = SFC_TXQ_INITIALIZED; txq->state = SFC_TXQ_INITIALIZED;
@ -579,6 +542,28 @@ sfc_tx_stop(struct sfc_adapter *sa)
efx_tx_fini(sa->nic); efx_tx_fini(sa->nic);
} }
static void
sfc_efx_tx_reap(struct sfc_efx_txq *txq)
{
unsigned int completed;
sfc_ev_qpoll(txq->evq);
for (completed = txq->completed;
completed != txq->pending; completed++) {
struct sfc_efx_tx_sw_desc *txd;
txd = &txq->sw_ring[completed & txq->ptr_mask];
if (txd->mbuf != NULL) {
rte_pktmbuf_free(txd->mbuf);
txd->mbuf = NULL;
}
}
txq->completed = completed;
}
/* /*
* The function is used to insert or update VLAN tag; * The function is used to insert or update VLAN tag;
* the firmware has state of the firmware tag to insert per TxQ * the firmware has state of the firmware tag to insert per TxQ
@ -587,7 +572,7 @@ sfc_tx_stop(struct sfc_adapter *sa)
* the function will update it * the function will update it
*/ */
static unsigned int static unsigned int
sfc_tx_maybe_insert_tag(struct sfc_txq *txq, struct rte_mbuf *m, sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
efx_desc_t **pend) efx_desc_t **pend)
{ {
uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ? uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
@ -610,10 +595,11 @@ sfc_tx_maybe_insert_tag(struct sfc_txq *txq, struct rte_mbuf *m,
return 1; return 1;
} }
uint16_t static uint16_t
sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{ {
struct sfc_txq *txq = (struct sfc_txq *)tx_queue; struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
unsigned int added = txq->added; unsigned int added = txq->added;
unsigned int pushed = added; unsigned int pushed = added;
unsigned int pkts_sent = 0; unsigned int pkts_sent = 0;
@ -625,7 +611,7 @@ sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rc __rte_unused; int rc __rte_unused;
struct rte_mbuf **pktp; struct rte_mbuf **pktp;
if (unlikely((txq->state & SFC_TXQ_RUNNING) == 0)) if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
goto done; goto done;
/* /*
@ -636,7 +622,7 @@ sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
reap_done = (fill_level > soft_max_fill); reap_done = (fill_level > soft_max_fill);
if (reap_done) { if (reap_done) {
sfc_tx_reap(txq); sfc_efx_tx_reap(txq);
/* /*
* Recalculate fill level since 'txq->completed' * Recalculate fill level since 'txq->completed'
* might have changed on reap * might have changed on reap
@ -659,14 +645,14 @@ sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
* TX_ERROR will occur * TX_ERROR will occur
*/ */
pkt_descs += sfc_tx_maybe_insert_tag(txq, m_seg, &pend); pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
if (m_seg->ol_flags & PKT_TX_TCP_SEG) { if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
/* /*
* We expect correct 'pkt->l[2, 3, 4]_len' values * We expect correct 'pkt->l[2, 3, 4]_len' values
* to be set correctly by the caller * to be set correctly by the caller
*/ */
if (sfc_tso_do(txq, added, &m_seg, &in_off, &pend, if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
&pkt_descs, &pkt_len) != 0) { &pkt_descs, &pkt_len) != 0) {
/* We may have reached this place for /* We may have reached this place for
* one of the following reasons: * one of the following reasons:
@ -749,7 +735,7 @@ sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* Try to reap (if we haven't yet). * Try to reap (if we haven't yet).
*/ */
if (!reap_done) { if (!reap_done) {
sfc_tx_reap(txq); sfc_efx_tx_reap(txq);
reap_done = B_TRUE; reap_done = B_TRUE;
fill_level = added - txq->completed; fill_level = added - txq->completed;
if (fill_level > hard_max_fill) { if (fill_level > hard_max_fill) {
@ -778,9 +764,169 @@ sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
if (!reap_done) if (!reap_done)
sfc_tx_reap(txq); sfc_efx_tx_reap(txq);
#endif #endif
done: done:
return pkts_sent; return pkts_sent;
} }
struct sfc_txq *
sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
{
const struct sfc_dp_queue *dpq = &dp_txq->dpq;
struct rte_eth_dev *eth_dev;
struct sfc_adapter *sa;
struct sfc_txq *txq;
SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
eth_dev = &rte_eth_devices[dpq->port_id];
sa = eth_dev->data->dev_private;
SFC_ASSERT(dpq->queue_id < sa->txq_count);
txq = sa->txq_info[dpq->queue_id].txq;
SFC_ASSERT(txq != NULL);
return txq;
}
static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
static int
sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
const struct rte_pci_addr *pci_addr,
int socket_id,
const struct sfc_dp_tx_qcreate_info *info,
struct sfc_dp_txq **dp_txqp)
{
struct sfc_efx_txq *txq;
struct sfc_txq *ctrl_txq;
int rc;
rc = ENOMEM;
txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
goto fail_txq_alloc;
sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
rc = ENOMEM;
txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
EFX_TXQ_LIMIT(info->txq_entries),
sizeof(*txq->pend_desc), 0,
socket_id);
if (txq->pend_desc == NULL)
goto fail_pend_desc_alloc;
rc = ENOMEM;
txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
info->txq_entries,
sizeof(*txq->sw_ring),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL)
goto fail_sw_ring_alloc;
ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
if (ctrl_txq->evq->sa->tso) {
rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
info->txq_entries, socket_id);
if (rc != 0)
goto fail_alloc_tsoh_objs;
}
txq->evq = ctrl_txq->evq;
txq->ptr_mask = info->txq_entries - 1;
txq->free_thresh = info->free_thresh;
txq->dma_desc_size_max = info->dma_desc_size_max;
*dp_txqp = &txq->dp;
return 0;
fail_alloc_tsoh_objs:
rte_free(txq->sw_ring);
fail_sw_ring_alloc:
rte_free(txq->pend_desc);
fail_pend_desc_alloc:
rte_free(txq);
fail_txq_alloc:
return rc;
}
static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
static void
sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
{
struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
rte_free(txq->sw_ring);
rte_free(txq->pend_desc);
rte_free(txq);
}
static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
static int
sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
__rte_unused unsigned int evq_read_ptr,
unsigned int txq_desc_index)
{
/* libefx-based datapath is specific to libefx-based PMD */
struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
txq->common = ctrl_txq->common;
txq->pending = txq->completed = txq->added = txq_desc_index;
txq->hw_vlan_tci = 0;
txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
return 0;
}
static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
static void
sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
__rte_unused unsigned int *evq_read_ptr)
{
struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
}
static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
static void
sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
{
struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
unsigned int txds;
sfc_efx_tx_reap(txq);
for (txds = 0; txds <= txq->ptr_mask; txds++) {
if (txq->sw_ring[txds].mbuf != NULL) {
rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
txq->sw_ring[txds].mbuf = NULL;
}
}
txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
}
struct sfc_dp_tx sfc_efx_tx = {
.dp = {
.name = SFC_KVARG_DATAPATH_EFX,
.type = SFC_DP_TX,
.hw_fw_caps = 0,
},
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,
.qstart = sfc_efx_tx_qstart,
.qstop = sfc_efx_tx_qstop,
.qreap = sfc_efx_tx_qreap,
.pkt_burst = sfc_efx_xmit_pkts,
};

View File

@ -37,6 +37,8 @@
#include "efx.h" #include "efx.h"
#include "sfc_dp_tx.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
@ -44,7 +46,11 @@ extern "C" {
struct sfc_adapter; struct sfc_adapter;
struct sfc_evq; struct sfc_evq;
struct sfc_tx_sw_desc { /**
* Software Tx descriptor information associated with hardware Tx
* descriptor.
*/
struct sfc_efx_tx_sw_desc {
struct rte_mbuf *mbuf; struct rte_mbuf *mbuf;
uint8_t *tsoh; /* Buffer to store TSO header */ uint8_t *tsoh; /* Buffer to store TSO header */
}; };
@ -54,22 +60,51 @@ enum sfc_txq_state_bit {
#define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT) #define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT)
SFC_TXQ_STARTED_BIT, SFC_TXQ_STARTED_BIT,
#define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT) #define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT)
SFC_TXQ_RUNNING_BIT,
#define SFC_TXQ_RUNNING (1 << SFC_TXQ_RUNNING_BIT)
SFC_TXQ_FLUSHING_BIT, SFC_TXQ_FLUSHING_BIT,
#define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT) #define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT)
SFC_TXQ_FLUSHED_BIT, SFC_TXQ_FLUSHED_BIT,
#define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT) #define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT)
}; };
/**
* Transmit queue control information. Not used on datapath.
* Allocated on the socket specified on the queue setup.
*/
struct sfc_txq { struct sfc_txq {
struct sfc_evq *evq;
struct sfc_tx_sw_desc *sw_ring;
unsigned int state; unsigned int state;
unsigned int hw_index;
struct sfc_evq *evq;
efsys_mem_t mem;
struct sfc_dp_txq *dp;
efx_txq_t *common;
unsigned int free_thresh;
unsigned int flags;
};
static inline unsigned int
sfc_txq_sw_index_by_hw_index(unsigned int hw_index)
{
return hw_index;
}
static inline unsigned int
sfc_txq_sw_index(const struct sfc_txq *txq)
{
return sfc_txq_sw_index_by_hw_index(txq->hw_index);
}
struct sfc_txq *sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq);
/**
* Transmit queue information used on libefx-based data path.
* Allocated on the socket specified on the queue setup.
*/
struct sfc_efx_txq {
struct sfc_evq *evq;
struct sfc_efx_tx_sw_desc *sw_ring;
unsigned int ptr_mask; unsigned int ptr_mask;
efx_desc_t *pend_desc; efx_desc_t *pend_desc;
efx_txq_t *common; efx_txq_t *common;
efsys_mem_t mem;
unsigned int added; unsigned int added;
unsigned int pending; unsigned int pending;
unsigned int completed; unsigned int completed;
@ -79,12 +114,17 @@ struct sfc_txq {
unsigned int hw_index; unsigned int hw_index;
unsigned int flags; unsigned int flags;
#define SFC_EFX_TXQ_FLAG_STARTED 0x1
#define SFC_EFX_TXQ_FLAG_RUNNING 0x2
/* Datapath transmit queue anchor */
struct sfc_dp_txq dp;
}; };
static inline unsigned int static inline struct sfc_efx_txq *
sfc_txq_sw_index(const struct sfc_txq *txq) sfc_efx_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
{ {
return txq->hw_index; return container_of(dp_txq, struct sfc_efx_txq, dp);
} }
struct sfc_txq_info { struct sfc_txq_info {
@ -108,17 +148,15 @@ void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
int sfc_tx_start(struct sfc_adapter *sa); int sfc_tx_start(struct sfc_adapter *sa);
void sfc_tx_stop(struct sfc_adapter *sa); void sfc_tx_stop(struct sfc_adapter *sa);
uint16_t sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
/* From 'sfc_tso.c' */ /* From 'sfc_tso.c' */
int sfc_tso_alloc_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, int sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
unsigned int txq_entries, unsigned int socket_id); unsigned int txq_entries,
void sfc_tso_free_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, unsigned int socket_id);
void sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
unsigned int txq_entries); unsigned int txq_entries);
int sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg, int sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
size_t *in_off, efx_desc_t **pend, unsigned int *pkt_descs, struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
size_t *pkt_len); unsigned int *pkt_descs, size_t *pkt_len);
#ifdef __cplusplus #ifdef __cplusplus
} }