net/sfc: reserve queues for port representors

A Tx/Rx queue pair is required to forward traffic between
port representors and virtual functions.

Signed-off-by: Igor Romanov <igor.romanov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Andy Moreton <amoreton@xilinx.com>
Reviewed-by: Ivan Malov <ivan.malov@oktetlabs.ru>
This commit is contained in:
Igor Romanov 2021-10-11 17:48:27 +03:00 committed by Ferruh Yigit
parent 52e80b1b64
commit 689a56742a
6 changed files with 124 additions and 31 deletions

View File

@ -46,6 +46,12 @@ sfc_repr_supported(const struct sfc_adapter *sa)
return true;
}
bool
sfc_repr_available(const struct sfc_adapter_shared *sas)
{
return sas->nb_repr_rxq > 0 && sas->nb_repr_txq > 0;
}
int
sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp)
@ -296,6 +302,41 @@ sfc_estimate_resource_limits(struct sfc_adapter *sa)
sas->counters_rxq_allocated = false;
}
if (sfc_repr_supported(sa) &&
evq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN +
SFC_REPR_PROXY_NB_TXQ_MIN &&
rxq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN &&
txq_allocated >= SFC_REPR_PROXY_NB_TXQ_MIN) {
unsigned int extra;
txq_allocated -= SFC_REPR_PROXY_NB_TXQ_MIN;
rxq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN;
evq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN +
SFC_REPR_PROXY_NB_TXQ_MIN;
sas->nb_repr_rxq = SFC_REPR_PROXY_NB_RXQ_MIN;
sas->nb_repr_txq = SFC_REPR_PROXY_NB_TXQ_MIN;
/* Allocate extra representor RxQs up to the maximum */
extra = MIN(evq_allocated, rxq_allocated);
extra = MIN(extra,
SFC_REPR_PROXY_NB_RXQ_MAX - sas->nb_repr_rxq);
evq_allocated -= extra;
rxq_allocated -= extra;
sas->nb_repr_rxq += extra;
/* Allocate extra representor TxQs up to the maximum */
extra = MIN(evq_allocated, txq_allocated);
extra = MIN(extra,
SFC_REPR_PROXY_NB_TXQ_MAX - sas->nb_repr_txq);
evq_allocated -= extra;
txq_allocated -= extra;
sas->nb_repr_txq += extra;
} else {
sas->nb_repr_rxq = 0;
sas->nb_repr_txq = 0;
}
/* Add remaining allocated queues */
sa->rxq_max += MIN(rxq_allocated, evq_allocated / 2);
sa->txq_max += MIN(txq_allocated, evq_allocated - sa->rxq_max);
@ -313,8 +354,10 @@ fail_nic_init:
static int
sfc_set_drv_limits(struct sfc_adapter *sa)
{
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
const struct rte_eth_dev_data *data = sa->eth_dev->data;
uint32_t rxq_reserved = sfc_nb_reserved_rxq(sfc_sa2shared(sa));
uint32_t rxq_reserved = sfc_nb_reserved_rxq(sas);
uint32_t txq_reserved = sfc_nb_txq_reserved(sas);
efx_drv_limits_t lim;
memset(&lim, 0, sizeof(lim));
@ -325,10 +368,12 @@ sfc_set_drv_limits(struct sfc_adapter *sa)
* sfc_estimate_resource_limits().
*/
lim.edl_min_evq_count = lim.edl_max_evq_count =
1 + data->nb_rx_queues + data->nb_tx_queues + rxq_reserved;
1 + data->nb_rx_queues + data->nb_tx_queues +
rxq_reserved + txq_reserved;
lim.edl_min_rxq_count = lim.edl_max_rxq_count =
data->nb_rx_queues + rxq_reserved;
lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
lim.edl_min_txq_count = lim.edl_max_txq_count =
data->nb_tx_queues + txq_reserved;
return efx_nic_set_drv_limits(sa->nic, &lim);
}

View File

@ -192,6 +192,8 @@ struct sfc_adapter_shared {
char *dp_tx_name;
bool counters_rxq_allocated;
unsigned int nb_repr_rxq;
unsigned int nb_repr_txq;
};
/* Adapter process private data */
@ -414,6 +416,19 @@ sfc_nb_counter_rxq(const struct sfc_adapter_shared *sas)
}
bool sfc_repr_supported(const struct sfc_adapter *sa);
bool sfc_repr_available(const struct sfc_adapter_shared *sas);
static inline unsigned int
sfc_repr_nb_rxq(const struct sfc_adapter_shared *sas)
{
return sas->nb_repr_rxq;
}
static inline unsigned int
sfc_repr_nb_txq(const struct sfc_adapter_shared *sas)
{
return sas->nb_repr_txq;
}
/** Get the number of milliseconds since boot from the default timer */
static inline uint64_t

View File

@ -70,14 +70,21 @@ sfc_mgmt_evq_sw_index(__rte_unused const struct sfc_adapter_shared *sas)
static inline unsigned int
sfc_nb_reserved_rxq(const struct sfc_adapter_shared *sas)
{
return sfc_nb_counter_rxq(sas);
return sfc_nb_counter_rxq(sas) + sfc_repr_nb_rxq(sas);
}
/* Return the number of Tx queues reserved for driver's internal use */
static inline unsigned int
sfc_nb_txq_reserved(const struct sfc_adapter_shared *sas)
{
return sfc_repr_nb_txq(sas);
}
static inline unsigned int
sfc_nb_reserved_evq(const struct sfc_adapter_shared *sas)
{
/* An EvQ is required for each reserved RxQ */
return 1 + sfc_nb_reserved_rxq(sas);
/* An EvQ is required for each reserved Rx/Tx queue */
return 1 + sfc_nb_reserved_rxq(sas) + sfc_nb_txq_reserved(sas);
}
/*
@ -112,6 +119,7 @@ sfc_counters_rxq_sw_index(const struct sfc_adapter_shared *sas)
* Own event queue is allocated for management, each Rx and each Tx queue.
* Zero event queue is used for management events.
* When counters are supported, one Rx event queue is reserved.
* When representors are supported, Rx and Tx event queues are reserved.
* Rx event queues follow reserved event queues.
* Tx event queues follow Rx event queues.
*/
@ -150,27 +158,37 @@ sfc_evq_sw_index_by_rxq_sw_index(struct sfc_adapter *sa,
}
static inline sfc_ethdev_qid_t
sfc_ethdev_tx_qid_by_txq_sw_index(__rte_unused struct sfc_adapter_shared *sas,
sfc_ethdev_tx_qid_by_txq_sw_index(struct sfc_adapter_shared *sas,
sfc_sw_index_t txq_sw_index)
{
/* Only ethdev queues are present for now */
return txq_sw_index;
if (txq_sw_index < sfc_nb_txq_reserved(sas))
return SFC_ETHDEV_QID_INVALID;
return txq_sw_index - sfc_nb_txq_reserved(sas);
}
static inline sfc_sw_index_t
sfc_txq_sw_index_by_ethdev_tx_qid(__rte_unused struct sfc_adapter_shared *sas,
sfc_txq_sw_index_by_ethdev_tx_qid(struct sfc_adapter_shared *sas,
sfc_ethdev_qid_t ethdev_qid)
{
/* Only ethdev queues are present for now */
return ethdev_qid;
return sfc_nb_txq_reserved(sas) + ethdev_qid;
}
static inline sfc_sw_index_t
sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter *sa,
sfc_sw_index_t txq_sw_index)
{
return sfc_nb_reserved_evq(sfc_sa2shared(sa)) +
sa->eth_dev->data->nb_rx_queues + txq_sw_index;
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
sfc_ethdev_qid_t ethdev_qid;
ethdev_qid = sfc_ethdev_tx_qid_by_txq_sw_index(sas, txq_sw_index);
if (ethdev_qid == SFC_ETHDEV_QID_INVALID) {
return sfc_nb_reserved_evq(sas) - sfc_nb_txq_reserved(sas) +
txq_sw_index;
}
return sfc_nb_reserved_evq(sas) + sa->eth_dev->data->nb_rx_queues +
ethdev_qid;
}
int sfc_ev_attach(struct sfc_adapter *sa);

View File

@ -29,6 +29,7 @@ sfc_repr_proxy_routine(void *arg)
int
sfc_repr_proxy_attach(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_repr_proxy *rp = &sa->repr_proxy;
struct rte_service_spec service;
uint32_t cid;
@ -37,7 +38,7 @@ sfc_repr_proxy_attach(struct sfc_adapter *sa)
sfc_log_init(sa, "entry");
if (!sfc_repr_supported(sa)) {
if (!sfc_repr_available(sas)) {
sfc_log_init(sa, "representors not supported - skip");
return 0;
}
@ -102,11 +103,12 @@ fail_get_service_lcore:
void
sfc_repr_proxy_detach(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_repr_proxy *rp = &sa->repr_proxy;
sfc_log_init(sa, "entry");
if (!sfc_repr_supported(sa)) {
if (!sfc_repr_available(sas)) {
sfc_log_init(sa, "representors not supported - skip");
return;
}
@ -120,6 +122,7 @@ sfc_repr_proxy_detach(struct sfc_adapter *sa)
int
sfc_repr_proxy_start(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_repr_proxy *rp = &sa->repr_proxy;
int rc;
@ -129,7 +132,7 @@ sfc_repr_proxy_start(struct sfc_adapter *sa)
* The condition to start the proxy is insufficient. It will be
* complemented with representor port start/stop support.
*/
if (!sfc_repr_supported(sa)) {
if (!sfc_repr_available(sas)) {
sfc_log_init(sa, "representors not supported - skip");
return 0;
}
@ -180,12 +183,13 @@ fail_start_core:
void
sfc_repr_proxy_stop(struct sfc_adapter *sa)
{
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_repr_proxy *rp = &sa->repr_proxy;
int rc;
sfc_log_init(sa, "entry");
if (!sfc_repr_supported(sa)) {
if (!sfc_repr_available(sas)) {
sfc_log_init(sa, "representors not supported - skip");
return;
}

View File

@ -16,6 +16,14 @@
extern "C" {
#endif
/* Number of supported RxQs with different mbuf memory pools */
#define SFC_REPR_PROXY_NB_RXQ_MIN (1)
#define SFC_REPR_PROXY_NB_RXQ_MAX (1)
/* One TxQ is required and sufficient for port representors support */
#define SFC_REPR_PROXY_NB_TXQ_MIN (1)
#define SFC_REPR_PROXY_NB_TXQ_MAX (1)
struct sfc_repr_proxy {
uint32_t service_core_id;
uint32_t service_id;

View File

@ -376,6 +376,8 @@ sfc_tx_configure(struct sfc_adapter *sa)
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
const unsigned int nb_rsvd_tx_queues = sfc_nb_txq_reserved(sas);
const unsigned int nb_txq_total = nb_tx_queues + nb_rsvd_tx_queues;
int rc = 0;
sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
@ -395,11 +397,11 @@ sfc_tx_configure(struct sfc_adapter *sa)
if (rc != 0)
goto fail_check_mode;
if (nb_tx_queues == sas->txq_count)
if (nb_txq_total == sas->txq_count)
goto done;
if (sas->txq_info == NULL) {
sas->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
sas->txq_info = rte_calloc_socket("sfc-txqs", nb_txq_total,
sizeof(sas->txq_info[0]), 0,
sa->socket_id);
if (sas->txq_info == NULL)
@ -410,7 +412,7 @@ sfc_tx_configure(struct sfc_adapter *sa)
* since it should not be shared.
*/
rc = ENOMEM;
sa->txq_ctrl = calloc(nb_tx_queues, sizeof(sa->txq_ctrl[0]));
sa->txq_ctrl = calloc(nb_txq_total, sizeof(sa->txq_ctrl[0]));
if (sa->txq_ctrl == NULL)
goto fail_txqs_ctrl_alloc;
} else {
@ -422,23 +424,23 @@ sfc_tx_configure(struct sfc_adapter *sa)
new_txq_info =
rte_realloc(sas->txq_info,
nb_tx_queues * sizeof(sas->txq_info[0]), 0);
if (new_txq_info == NULL && nb_tx_queues > 0)
nb_txq_total * sizeof(sas->txq_info[0]), 0);
if (new_txq_info == NULL && nb_txq_total > 0)
goto fail_txqs_realloc;
new_txq_ctrl = realloc(sa->txq_ctrl,
nb_tx_queues * sizeof(sa->txq_ctrl[0]));
if (new_txq_ctrl == NULL && nb_tx_queues > 0)
nb_txq_total * sizeof(sa->txq_ctrl[0]));
if (new_txq_ctrl == NULL && nb_txq_total > 0)
goto fail_txqs_ctrl_realloc;
sas->txq_info = new_txq_info;
sa->txq_ctrl = new_txq_ctrl;
if (nb_tx_queues > sas->ethdev_txq_count) {
memset(&sas->txq_info[sas->ethdev_txq_count], 0,
(nb_tx_queues - sas->ethdev_txq_count) *
if (nb_txq_total > sas->txq_count) {
memset(&sas->txq_info[sas->txq_count], 0,
(nb_txq_total - sas->txq_count) *
sizeof(sas->txq_info[0]));
memset(&sa->txq_ctrl[sas->ethdev_txq_count], 0,
(nb_tx_queues - sas->ethdev_txq_count) *
memset(&sa->txq_ctrl[sas->txq_count], 0,
(nb_txq_total - sas->txq_count) *
sizeof(sa->txq_ctrl[0]));
}
}
@ -455,7 +457,8 @@ sfc_tx_configure(struct sfc_adapter *sa)
sas->ethdev_txq_count++;
}
sas->txq_count = sas->ethdev_txq_count;
/* TODO: initialize reserved queues when supported. */
sas->txq_count = sas->ethdev_txq_count + nb_rsvd_tx_queues;
done:
return 0;