net/sfc: prepare for internal Rx queue

Make software index of an Rx queue and ethdev index separate.
When an ethdev RxQ is accessed in ethdev callbacks, an explicit ethdev
queue index is used.

Signed-off-by: Igor Romanov <igor.romanov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Andy Moreton <amoreton@xilinx.com>
Reviewed-by: Ivan Malov <ivan.malov@oktetlabs.ru>
This commit is contained in:
Igor Romanov 2021-07-02 11:39:29 +03:00 committed by David Marchand
parent 00b67591d2
commit 09cafbddbb
8 changed files with 215 additions and 95 deletions

View File

@ -29,6 +29,7 @@
#include "sfc_filter.h" #include "sfc_filter.h"
#include "sfc_sriov.h" #include "sfc_sriov.h"
#include "sfc_mae.h" #include "sfc_mae.h"
#include "sfc_dp.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -168,6 +169,7 @@ struct sfc_rss {
struct sfc_adapter_shared { struct sfc_adapter_shared {
unsigned int rxq_count; unsigned int rxq_count;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
unsigned int ethdev_rxq_count;
unsigned int txq_count; unsigned int txq_count;
struct sfc_txq_info *txq_info; struct sfc_txq_info *txq_info;

View File

@ -96,6 +96,10 @@ struct sfc_dp {
/** List of datapath variants */ /** List of datapath variants */
TAILQ_HEAD(sfc_dp_list, sfc_dp); TAILQ_HEAD(sfc_dp_list, sfc_dp);
typedef unsigned int sfc_sw_index_t;
typedef int32_t sfc_ethdev_qid_t;
#define SFC_ETHDEV_QID_INVALID ((sfc_ethdev_qid_t)(-1))
/* Check if available HW/FW capabilities are sufficient for the datapath */ /* Check if available HW/FW capabilities are sufficient for the datapath */
static inline bool static inline bool
sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps) sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps)

View File

@ -463,26 +463,31 @@ sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
} }
static int static int
sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
uint16_t nb_rx_desc, unsigned int socket_id, uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf, const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool) struct rte_mempool *mb_pool)
{ {
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
sfc_sw_index_t sw_index;
int rc; int rc;
sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
rx_queue_id, nb_rx_desc, socket_id); ethdev_qid, nb_rx_desc, socket_id);
sfc_adapter_lock(sa); sfc_adapter_lock(sa);
rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
rc = sfc_rx_qinit(sa, sw_index, nb_rx_desc, socket_id,
rx_conf, mb_pool); rx_conf, mb_pool);
if (rc != 0) if (rc != 0)
goto fail_rx_qinit; goto fail_rx_qinit;
dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp; rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
dev->data->rx_queues[ethdev_qid] = rxq_info->dp;
sfc_adapter_unlock(sa); sfc_adapter_unlock(sa);
@ -500,7 +505,7 @@ sfc_rx_queue_release(void *queue)
struct sfc_dp_rxq *dp_rxq = queue; struct sfc_dp_rxq *dp_rxq = queue;
struct sfc_rxq *rxq; struct sfc_rxq *rxq;
struct sfc_adapter *sa; struct sfc_adapter *sa;
unsigned int sw_index; sfc_sw_index_t sw_index;
if (dp_rxq == NULL) if (dp_rxq == NULL)
return; return;
@ -1182,15 +1187,14 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev,
* use any process-local pointers from the adapter data. * use any process-local pointers from the adapter data.
*/ */
static void static void
sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
struct rte_eth_rxq_info *qinfo) struct rte_eth_rxq_info *qinfo)
{ {
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
SFC_ASSERT(rx_queue_id < sas->rxq_count); rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
rxq_info = &sas->rxq_info[rx_queue_id];
qinfo->mp = rxq_info->refill_mb_pool; qinfo->mp = rxq_info->refill_mb_pool;
qinfo->conf.rx_free_thresh = rxq_info->refill_threshold; qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
@ -1232,14 +1236,14 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
* use any process-local pointers from the adapter data. * use any process-local pointers from the adapter data.
*/ */
static uint32_t static uint32_t
sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{ {
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
SFC_ASSERT(rx_queue_id < sas->rxq_count); rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
rxq_info = &sas->rxq_info[rx_queue_id];
if ((rxq_info->state & SFC_RXQ_STARTED) == 0) if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
return 0; return 0;
@ -1293,13 +1297,16 @@ sfc_tx_descriptor_status(void *queue, uint16_t offset)
} }
static int static int
sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{ {
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
sfc_sw_index_t sw_index;
int rc; int rc;
sfc_log_init(sa, "RxQ=%u", rx_queue_id); sfc_log_init(sa, "RxQ=%u", ethdev_qid);
sfc_adapter_lock(sa); sfc_adapter_lock(sa);
@ -1307,14 +1314,16 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (sa->state != SFC_ADAPTER_STARTED) if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started; goto fail_not_started;
if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED) rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
if (rxq_info->state != SFC_RXQ_INITIALIZED)
goto fail_not_setup; goto fail_not_setup;
rc = sfc_rx_qstart(sa, rx_queue_id); sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
rc = sfc_rx_qstart(sa, sw_index);
if (rc != 0) if (rc != 0)
goto fail_rx_qstart; goto fail_rx_qstart;
sas->rxq_info[rx_queue_id].deferred_started = B_TRUE; rxq_info->deferred_started = B_TRUE;
sfc_adapter_unlock(sa); sfc_adapter_unlock(sa);
@ -1329,17 +1338,23 @@ fail_not_started:
} }
static int static int
sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{ {
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info;
sfc_sw_index_t sw_index;
sfc_log_init(sa, "RxQ=%u", rx_queue_id); sfc_log_init(sa, "RxQ=%u", ethdev_qid);
sfc_adapter_lock(sa); sfc_adapter_lock(sa);
sfc_rx_qstop(sa, rx_queue_id);
sas->rxq_info[rx_queue_id].deferred_started = B_FALSE; sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
sfc_rx_qstop(sa, sw_index);
rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
rxq_info->deferred_started = B_FALSE;
sfc_adapter_unlock(sa); sfc_adapter_unlock(sa);
@ -1766,27 +1781,27 @@ sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
} }
static int static int
sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{ {
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
SFC_ASSERT(queue_id < sas->rxq_count); rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
rxq_info = &sas->rxq_info[queue_id];
return sap->dp_rx->intr_enable(rxq_info->dp); return sap->dp_rx->intr_enable(rxq_info->dp);
} }
static int static int
sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
{ {
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
SFC_ASSERT(queue_id < sas->rxq_count); rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
rxq_info = &sas->rxq_info[queue_id];
return sap->dp_rx->intr_disable(rxq_info->dp); return sap->dp_rx->intr_disable(rxq_info->dp);
} }

View File

@ -582,7 +582,7 @@ sfc_ev_qpoll(struct sfc_evq *evq)
int rc; int rc;
if (evq->dp_rxq != NULL) { if (evq->dp_rxq != NULL) {
unsigned int rxq_sw_index; sfc_sw_index_t rxq_sw_index;
rxq_sw_index = evq->dp_rxq->dpq.queue_id; rxq_sw_index = evq->dp_rxq->dpq.queue_id;

View File

@ -69,9 +69,25 @@ struct sfc_evq {
* Tx event queues follow Rx event queues. * Tx event queues follow Rx event queues.
*/ */
static inline unsigned int static inline sfc_ethdev_qid_t
sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa, sfc_ethdev_rx_qid_by_rxq_sw_index(__rte_unused struct sfc_adapter_shared *sas,
unsigned int rxq_sw_index) sfc_sw_index_t rxq_sw_index)
{
/* Only ethdev queues are present for now */
return rxq_sw_index;
}
static inline sfc_sw_index_t
sfc_rxq_sw_index_by_ethdev_rx_qid(__rte_unused struct sfc_adapter_shared *sas,
sfc_ethdev_qid_t ethdev_qid)
{
/* Only ethdev queues are present for now */
return ethdev_qid;
}
static inline sfc_sw_index_t
sfc_evq_sw_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa,
sfc_sw_index_t rxq_sw_index)
{ {
return 1 + rxq_sw_index; return 1 + rxq_sw_index;
} }

View File

@ -1400,10 +1400,10 @@ sfc_flow_parse_queue(struct sfc_adapter *sa,
struct sfc_rxq *rxq; struct sfc_rxq *rxq;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
if (queue->index >= sfc_sa2shared(sa)->rxq_count) if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
return -EINVAL; return -EINVAL;
rxq = &sa->rxq_ctrl[queue->index]; rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
@ -1420,7 +1420,7 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
{ {
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_rss *rss = &sas->rss; struct sfc_rss *rss = &sas->rss;
unsigned int rxq_sw_index; sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq *rxq; struct sfc_rxq *rxq;
unsigned int rxq_hw_index_min; unsigned int rxq_hw_index_min;
unsigned int rxq_hw_index_max; unsigned int rxq_hw_index_max;
@ -1434,18 +1434,19 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
if (action_rss->queue_num == 0) if (action_rss->queue_num == 0)
return -EINVAL; return -EINVAL;
rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1; ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
rxq = &sa->rxq_ctrl[rxq_sw_index]; rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
rxq_hw_index_min = rxq->hw_index; rxq_hw_index_min = rxq->hw_index;
rxq_hw_index_max = 0; rxq_hw_index_max = 0;
for (i = 0; i < action_rss->queue_num; ++i) { for (i = 0; i < action_rss->queue_num; ++i) {
rxq_sw_index = action_rss->queue[i]; ethdev_qid = action_rss->queue[i];
if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count) if ((unsigned int)ethdev_qid >=
sfc_sa2shared(sa)->ethdev_rxq_count)
return -EINVAL; return -EINVAL;
rxq = &sa->rxq_ctrl[rxq_sw_index]; rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
if (rxq->hw_index < rxq_hw_index_min) if (rxq->hw_index < rxq_hw_index_min)
rxq_hw_index_min = rxq->hw_index; rxq_hw_index_min = rxq->hw_index;
@ -1509,9 +1510,10 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
unsigned int nb_queues = action_rss->queue_num; unsigned int nb_queues = action_rss->queue_num;
unsigned int rxq_sw_index = action_rss->queue[i % nb_queues]; struct sfc_rxq *rxq;
struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
ethdev_qid = action_rss->queue[i % nb_queues];
rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
} }

View File

@ -654,14 +654,17 @@ struct sfc_dp_rx sfc_efx_rx = {
}; };
static void static void
sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) sfc_rx_qflush(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{ {
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq; struct sfc_rxq *rxq;
unsigned int retry_count; unsigned int retry_count;
unsigned int wait_count; unsigned int wait_count;
int rc; int rc;
ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED); SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
@ -698,13 +701,16 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
(wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS)); (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
if (rxq_info->state & SFC_RXQ_FLUSHING) if (rxq_info->state & SFC_RXQ_FLUSHING)
sfc_err(sa, "RxQ %u flush timed out", sw_index); sfc_err(sa, "RxQ %d (internal %u) flush timed out",
ethdev_qid, sw_index);
if (rxq_info->state & SFC_RXQ_FLUSH_FAILED) if (rxq_info->state & SFC_RXQ_FLUSH_FAILED)
sfc_err(sa, "RxQ %u flush failed", sw_index); sfc_err(sa, "RxQ %d (internal %u) flush failed",
ethdev_qid, sw_index);
if (rxq_info->state & SFC_RXQ_FLUSHED) if (rxq_info->state & SFC_RXQ_FLUSHED)
sfc_notice(sa, "RxQ %u flushed", sw_index); sfc_notice(sa, "RxQ %d (internal %u) flushed",
ethdev_qid, sw_index);
} }
sa->priv.dp_rx->qpurge(rxq_info->dp); sa->priv.dp_rx->qpurge(rxq_info->dp);
@ -764,17 +770,20 @@ retry:
} }
int int
sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{ {
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq; struct sfc_rxq *rxq;
struct sfc_evq *evq; struct sfc_evq *evq;
efx_rx_prefix_layout_t pinfo; efx_rx_prefix_layout_t pinfo;
int rc; int rc;
sfc_log_init(sa, "sw_index=%u", sw_index);
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED); SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
@ -782,7 +791,7 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
rxq = &sa->rxq_ctrl[sw_index]; rxq = &sa->rxq_ctrl[sw_index];
evq = rxq->evq; evq = rxq->evq;
rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index)); rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index));
if (rc != 0) if (rc != 0)
goto fail_ev_qstart; goto fail_ev_qstart;
@ -833,15 +842,16 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
rxq_info->state |= SFC_RXQ_STARTED; rxq_info->state |= SFC_RXQ_STARTED;
if (sw_index == 0 && !sfc_sa2shared(sa)->isolated) { if (ethdev_qid == 0 && !sfc_sa2shared(sa)->isolated) {
rc = sfc_rx_default_rxq_set_filter(sa, rxq); rc = sfc_rx_default_rxq_set_filter(sa, rxq);
if (rc != 0) if (rc != 0)
goto fail_mac_filter_default_rxq_set; goto fail_mac_filter_default_rxq_set;
} }
/* It seems to be used by DPDK for debug purposes only ('rte_ether') */ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
sa->eth_dev->data->rx_queue_state[sw_index] = if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
RTE_ETH_QUEUE_STATE_STARTED; sa->eth_dev->data->rx_queue_state[ethdev_qid] =
RTE_ETH_QUEUE_STATE_STARTED;
return 0; return 0;
@ -864,14 +874,17 @@ fail_ev_qstart:
} }
void void
sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index) sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{ {
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq; struct sfc_rxq *rxq;
sfc_log_init(sa, "sw_index=%u", sw_index);
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
@ -880,13 +893,14 @@ sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED); SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
/* It seems to be used by DPDK for debug purposes only ('rte_ether') */ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
sa->eth_dev->data->rx_queue_state[sw_index] = if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
RTE_ETH_QUEUE_STATE_STOPPED; sa->eth_dev->data->rx_queue_state[ethdev_qid] =
RTE_ETH_QUEUE_STATE_STOPPED;
rxq = &sa->rxq_ctrl[sw_index]; rxq = &sa->rxq_ctrl[sw_index];
sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr); sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
if (sw_index == 0) if (ethdev_qid == 0)
efx_mac_filter_default_rxq_clear(sa->nic); efx_mac_filter_default_rxq_clear(sa->nic);
sfc_rx_qflush(sa, sw_index); sfc_rx_qflush(sa, sw_index);
@ -1056,11 +1070,13 @@ sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
} }
int int
sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
uint16_t nb_rx_desc, unsigned int socket_id, uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf, const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool) struct rte_mempool *mb_pool)
{ {
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
sfc_ethdev_qid_t ethdev_qid;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
int rc; int rc;
@ -1092,16 +1108,22 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(rxq_entries <= sa->rxq_max_entries); SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc); SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
offloads = rx_conf->offloads | ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
sa->eth_dev->data->dev_conf.rxmode.offloads;
offloads = rx_conf->offloads;
/* Add device level Rx offloads if the queue is an ethdev Rx queue */
if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
offloads |= sa->eth_dev->data->dev_conf.rxmode.offloads;
rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads); rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
if (rc != 0) if (rc != 0)
goto fail_bad_conf; goto fail_bad_conf;
buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool); buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
if (buf_size == 0) { if (buf_size == 0) {
sfc_err(sa, "RxQ %u mbuf pool object size is too small", sfc_err(sa,
sw_index); "RxQ %d (internal %u) mbuf pool object size is too small",
ethdev_qid, sw_index);
rc = EINVAL; rc = EINVAL;
goto fail_bad_conf; goto fail_bad_conf;
} }
@ -1111,11 +1133,13 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
(offloads & DEV_RX_OFFLOAD_SCATTER), (offloads & DEV_RX_OFFLOAD_SCATTER),
encp->enc_rx_scatter_max, encp->enc_rx_scatter_max,
&error)) { &error)) {
sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error); sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs " ethdev_qid, sw_index, error);
sfc_err(sa,
"RxQ %d (internal %u) calculated Rx buffer size is %u vs "
"PDU size %u plus Rx prefix %u bytes", "PDU size %u plus Rx prefix %u bytes",
sw_index, buf_size, (unsigned int)sa->port.pdu, ethdev_qid, sw_index, buf_size,
encp->enc_rx_prefix_size); (unsigned int)sa->port.pdu, encp->enc_rx_prefix_size);
rc = EINVAL; rc = EINVAL;
goto fail_bad_conf; goto fail_bad_conf;
} }
@ -1193,7 +1217,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.flags = rxq_info->rxq_flags; info.flags = rxq_info->rxq_flags;
info.rxq_entries = rxq_info->entries; info.rxq_entries = rxq_info->entries;
info.rxq_hw_ring = rxq->mem.esm_base; info.rxq_hw_ring = rxq->mem.esm_base;
info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index); info.evq_hw_index = sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index);
info.evq_entries = evq_entries; info.evq_entries = evq_entries;
info.evq_hw_ring = evq->mem.esm_base; info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = rxq->hw_index; info.hw_index = rxq->hw_index;
@ -1231,13 +1255,18 @@ fail_size_up_rings:
} }
void void
sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index) sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{ {
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
sfc_ethdev_qid_t ethdev_qid;
struct sfc_rxq_info *rxq_info; struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq; struct sfc_rxq *rxq;
SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
sa->eth_dev->data->rx_queues[sw_index] = NULL; ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
sa->eth_dev->data->rx_queues[ethdev_qid] = NULL;
rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
@ -1479,14 +1508,41 @@ finish:
return rc; return rc;
} }
struct sfc_rxq_info *
sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
sfc_ethdev_qid_t ethdev_qid)
{
sfc_sw_index_t sw_index;
SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
return &sas->rxq_info[sw_index];
}
struct sfc_rxq *
sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa, sfc_ethdev_qid_t ethdev_qid)
{
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
sfc_sw_index_t sw_index;
SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
return &sa->rxq_ctrl[sw_index];
}
int int
sfc_rx_start(struct sfc_adapter *sa) sfc_rx_start(struct sfc_adapter *sa)
{ {
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
unsigned int sw_index; sfc_sw_index_t sw_index;
int rc; int rc;
sfc_log_init(sa, "rxq_count=%u", sas->rxq_count); sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
sas->rxq_count);
rc = efx_rx_init(sa->nic); rc = efx_rx_init(sa->nic);
if (rc != 0) if (rc != 0)
@ -1524,9 +1580,10 @@ void
sfc_rx_stop(struct sfc_adapter *sa) sfc_rx_stop(struct sfc_adapter *sa)
{ {
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
unsigned int sw_index; sfc_sw_index_t sw_index;
sfc_log_init(sa, "rxq_count=%u", sas->rxq_count); sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
sas->rxq_count);
sw_index = sas->rxq_count; sw_index = sas->rxq_count;
while (sw_index-- > 0) { while (sw_index-- > 0) {
@ -1538,7 +1595,7 @@ sfc_rx_stop(struct sfc_adapter *sa)
} }
static int static int
sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
{ {
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index]; struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
@ -1606,17 +1663,29 @@ static void
sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
{ {
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
int sw_index; sfc_sw_index_t sw_index;
sfc_ethdev_qid_t ethdev_qid;
SFC_ASSERT(nb_rx_queues <= sas->rxq_count); SFC_ASSERT(nb_rx_queues <= sas->ethdev_rxq_count);
sw_index = sas->rxq_count; /*
while (--sw_index >= (int)nb_rx_queues) { * Finalize only ethdev queues since other ones are finalized only
if (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED) * on device close and they may require additional deinitializaton.
*/
ethdev_qid = sas->ethdev_rxq_count;
while (--ethdev_qid >= (int)nb_rx_queues) {
struct sfc_rxq_info *rxq_info;
rxq_info = sfc_rxq_info_by_ethdev_qid(sas, ethdev_qid);
if (rxq_info->state & SFC_RXQ_INITIALIZED) {
sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
ethdev_qid);
sfc_rx_qfini(sa, sw_index); sfc_rx_qfini(sa, sw_index);
}
} }
sas->rxq_count = nb_rx_queues; sas->ethdev_rxq_count = nb_rx_queues;
} }
/** /**
@ -1637,7 +1706,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
int rc; int rc;
sfc_log_init(sa, "nb_rx_queues=%u (old %u)", sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
nb_rx_queues, sas->rxq_count); nb_rx_queues, sas->ethdev_rxq_count);
rc = sfc_rx_check_mode(sa, &dev_conf->rxmode); rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
if (rc != 0) if (rc != 0)
@ -1666,7 +1735,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
struct sfc_rxq_info *new_rxq_info; struct sfc_rxq_info *new_rxq_info;
struct sfc_rxq *new_rxq_ctrl; struct sfc_rxq *new_rxq_ctrl;
if (nb_rx_queues < sas->rxq_count) if (nb_rx_queues < sas->ethdev_rxq_count)
sfc_rx_fini_queues(sa, nb_rx_queues); sfc_rx_fini_queues(sa, nb_rx_queues);
rc = ENOMEM; rc = ENOMEM;
@ -1685,30 +1754,38 @@ sfc_rx_configure(struct sfc_adapter *sa)
sas->rxq_info = new_rxq_info; sas->rxq_info = new_rxq_info;
sa->rxq_ctrl = new_rxq_ctrl; sa->rxq_ctrl = new_rxq_ctrl;
if (nb_rx_queues > sas->rxq_count) { if (nb_rx_queues > sas->rxq_count) {
memset(&sas->rxq_info[sas->rxq_count], 0, unsigned int rxq_count = sas->rxq_count;
(nb_rx_queues - sas->rxq_count) *
memset(&sas->rxq_info[rxq_count], 0,
(nb_rx_queues - rxq_count) *
sizeof(sas->rxq_info[0])); sizeof(sas->rxq_info[0]));
memset(&sa->rxq_ctrl[sas->rxq_count], 0, memset(&sa->rxq_ctrl[rxq_count], 0,
(nb_rx_queues - sas->rxq_count) * (nb_rx_queues - rxq_count) *
sizeof(sa->rxq_ctrl[0])); sizeof(sa->rxq_ctrl[0]));
} }
} }
while (sas->rxq_count < nb_rx_queues) { while (sas->ethdev_rxq_count < nb_rx_queues) {
rc = sfc_rx_qinit_info(sa, sas->rxq_count); sfc_sw_index_t sw_index;
sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
sas->ethdev_rxq_count);
rc = sfc_rx_qinit_info(sa, sw_index);
if (rc != 0) if (rc != 0)
goto fail_rx_qinit_info; goto fail_rx_qinit_info;
sas->rxq_count++; sas->ethdev_rxq_count++;
} }
sas->rxq_count = sas->ethdev_rxq_count;
configure_rss: configure_rss:
rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
MIN(sas->rxq_count, EFX_MAXRSS) : 0; MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
if (rss->channels > 0) { if (rss->channels > 0) {
struct rte_eth_rss_conf *adv_conf_rss; struct rte_eth_rss_conf *adv_conf_rss;
unsigned int sw_index; sfc_sw_index_t sw_index;
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index) for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
rss->tbl[sw_index] = sw_index % rss->channels; rss->tbl[sw_index] = sw_index % rss->channels;

View File

@ -119,6 +119,10 @@ struct sfc_rxq_info {
}; };
struct sfc_rxq_info *sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq); struct sfc_rxq_info *sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
struct sfc_rxq_info *sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
sfc_ethdev_qid_t ethdev_qid);
struct sfc_rxq *sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa,
sfc_ethdev_qid_t ethdev_qid);
int sfc_rx_configure(struct sfc_adapter *sa); int sfc_rx_configure(struct sfc_adapter *sa);
void sfc_rx_close(struct sfc_adapter *sa); void sfc_rx_close(struct sfc_adapter *sa);
@ -129,9 +133,9 @@ int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id, uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf, const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool); struct rte_mempool *mb_pool);
void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index); void sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index);
int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index); int sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index);
void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index); void sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index);
uint64_t sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa); uint64_t sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa);
uint64_t sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa); uint64_t sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa);