net/sfc: implement representor queue setup and release

Implement queue creation and destruction both in port representors
and representor proxy.

Signed-off-by: Igor Romanov <igor.romanov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Andy Moreton <amoreton@xilinx.com>
Reviewed-by: Ivan Malov <ivan.malov@oktetlabs.ru>
This commit is contained in:
Igor Romanov 2021-10-11 17:48:35 +03:00 committed by Ferruh Yigit
parent c377f1adf7
commit 155583abe6
4 changed files with 426 additions and 0 deletions

View File

@ -30,6 +30,17 @@ struct sfc_repr_shared {
uint16_t switch_port_id;
};
struct sfc_repr_rxq {
/* Datapath members */
struct rte_ring *ring;
};
struct sfc_repr_txq {
/* Datapath members */
struct rte_ring *ring;
efx_mport_id_t egress_mport;
};
/** Primary process representor private data */
struct sfc_repr {
/**
@ -50,6 +61,14 @@ struct sfc_repr {
SFC_GENERIC_LOG(ERR, __VA_ARGS__); \
} while (0)
#define sfcr_warn(sr, ...) \
do { \
const struct sfc_repr *_sr = (sr); \
\
(void)_sr; \
SFC_GENERIC_LOG(WARNING, __VA_ARGS__); \
} while (0)
#define sfcr_info(sr, ...) \
do { \
const struct sfc_repr *_sr = (sr); \
@ -269,6 +288,229 @@ sfc_repr_dev_infos_get(struct rte_eth_dev *dev,
return 0;
}
static int
sfc_repr_ring_create(uint16_t pf_port_id, uint16_t repr_id,
const char *type_name, uint16_t qid, uint16_t nb_desc,
unsigned int socket_id, struct rte_ring **ring)
{
char ring_name[RTE_RING_NAMESIZE];
int ret;
ret = snprintf(ring_name, sizeof(ring_name), "sfc_%u_repr_%u_%sq%u",
pf_port_id, repr_id, type_name, qid);
if (ret >= (int)sizeof(ring_name))
return -ENAMETOOLONG;
/*
* Single producer/consumer rings are used since the API for Tx/Rx
* packet burst for representors are guaranteed to be called from
* a single thread, and the user of the other end (representor proxy)
* is also single-threaded.
*/
*ring = rte_ring_create(ring_name, nb_desc, socket_id,
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (*ring == NULL)
return -rte_errno;
return 0;
}
static int
sfc_repr_rx_qcheck_conf(struct sfc_repr *sr,
const struct rte_eth_rxconf *rx_conf)
{
int ret = 0;
sfcr_info(sr, "entry");
if (rx_conf->rx_thresh.pthresh != 0 ||
rx_conf->rx_thresh.hthresh != 0 ||
rx_conf->rx_thresh.wthresh != 0) {
sfcr_warn(sr,
"RxQ prefetch/host/writeback thresholds are not supported");
}
if (rx_conf->rx_free_thresh != 0)
sfcr_warn(sr, "RxQ free threshold is not supported");
if (rx_conf->rx_drop_en == 0)
sfcr_warn(sr, "RxQ drop disable is not supported");
if (rx_conf->rx_deferred_start) {
sfcr_err(sr, "Deferred start is not supported");
ret = -EINVAL;
}
sfcr_info(sr, "done: %s", rte_strerror(-ret));
return ret;
}
static int
sfc_repr_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
__rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
struct sfc_repr_rxq *rxq;
int ret;
sfcr_info(sr, "entry");
ret = sfc_repr_rx_qcheck_conf(sr, rx_conf);
if (ret != 0)
goto fail_check_conf;
ret = -ENOMEM;
rxq = rte_zmalloc_socket("sfc-repr-rxq", sizeof(*rxq),
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq == NULL) {
sfcr_err(sr, "%s() failed to alloc RxQ", __func__);
goto fail_rxq_alloc;
}
ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
"rx", rx_queue_id, nb_rx_desc,
socket_id, &rxq->ring);
if (ret != 0) {
sfcr_err(sr, "%s() failed to create ring", __func__);
goto fail_ring_create;
}
ret = sfc_repr_proxy_add_rxq(srs->pf_port_id, srs->repr_id,
rx_queue_id, rxq->ring, mb_pool);
if (ret != 0) {
SFC_ASSERT(ret > 0);
ret = -ret;
sfcr_err(sr, "%s() failed to add proxy RxQ", __func__);
goto fail_proxy_add_rxq;
}
dev->data->rx_queues[rx_queue_id] = rxq;
sfcr_info(sr, "done");
return 0;
fail_proxy_add_rxq:
rte_ring_free(rxq->ring);
fail_ring_create:
rte_free(rxq);
fail_rxq_alloc:
fail_check_conf:
sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
return ret;
}
static void
sfc_repr_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
struct sfc_repr_rxq *rxq = dev->data->rx_queues[rx_queue_id];
sfc_repr_proxy_del_rxq(srs->pf_port_id, srs->repr_id, rx_queue_id);
rte_ring_free(rxq->ring);
rte_free(rxq);
}
static int
sfc_repr_tx_qcheck_conf(struct sfc_repr *sr,
const struct rte_eth_txconf *tx_conf)
{
int ret = 0;
sfcr_info(sr, "entry");
if (tx_conf->tx_rs_thresh != 0)
sfcr_warn(sr, "RS bit in transmit descriptor is not supported");
if (tx_conf->tx_free_thresh != 0)
sfcr_warn(sr, "TxQ free threshold is not supported");
if (tx_conf->tx_thresh.pthresh != 0 ||
tx_conf->tx_thresh.hthresh != 0 ||
tx_conf->tx_thresh.wthresh != 0) {
sfcr_warn(sr,
"prefetch/host/writeback thresholds are not supported");
}
if (tx_conf->tx_deferred_start) {
sfcr_err(sr, "Deferred start is not supported");
ret = -EINVAL;
}
sfcr_info(sr, "done: %s", rte_strerror(-ret));
return ret;
}
static int
sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
struct sfc_repr_txq *txq;
int ret;
sfcr_info(sr, "entry");
ret = sfc_repr_tx_qcheck_conf(sr, tx_conf);
if (ret != 0)
goto fail_check_conf;
ret = -ENOMEM;
txq = rte_zmalloc_socket("sfc-repr-txq", sizeof(*txq),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
goto fail_txq_alloc;
ret = sfc_repr_ring_create(srs->pf_port_id, srs->repr_id,
"tx", tx_queue_id, nb_tx_desc,
socket_id, &txq->ring);
if (ret != 0)
goto fail_ring_create;
ret = sfc_repr_proxy_add_txq(srs->pf_port_id, srs->repr_id,
tx_queue_id, txq->ring,
&txq->egress_mport);
if (ret != 0)
goto fail_proxy_add_txq;
dev->data->tx_queues[tx_queue_id] = txq;
sfcr_info(sr, "done");
return 0;
fail_proxy_add_txq:
rte_ring_free(txq->ring);
fail_ring_create:
rte_free(txq);
fail_txq_alloc:
fail_check_conf:
sfcr_err(sr, "%s() failed: %s", __func__, rte_strerror(-ret));
return ret;
}
static void
sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id];
sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id);
rte_ring_free(txq->ring);
rte_free(txq);
}
static void
sfc_repr_close(struct sfc_repr *sr)
{
@ -287,6 +529,7 @@ sfc_repr_dev_close(struct rte_eth_dev *dev)
{
struct sfc_repr *sr = sfc_repr_by_eth_dev(dev);
struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev);
unsigned int i;
sfcr_info(sr, "entry");
@ -303,6 +546,16 @@ sfc_repr_dev_close(struct rte_eth_dev *dev)
break;
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
sfc_repr_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
sfc_repr_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
/*
* Cleanup all resources.
* Rollback primary process sfc_repr_eth_dev_init() below.
@ -326,6 +579,10 @@ static const struct eth_dev_ops sfc_repr_dev_ops = {
.dev_configure = sfc_repr_dev_configure,
.dev_close = sfc_repr_dev_close,
.dev_infos_get = sfc_repr_dev_infos_get,
.rx_queue_setup = sfc_repr_rx_queue_setup,
.rx_queue_release = sfc_repr_rx_queue_release,
.tx_queue_setup = sfc_repr_tx_queue_setup,
.tx_queue_release = sfc_repr_tx_queue_release,
};

View File

@ -528,3 +528,135 @@ fail_no_port:
return rc;
}
int
sfc_repr_proxy_add_rxq(uint16_t pf_port_id, uint16_t repr_id,
uint16_t queue_id, struct rte_ring *rx_ring,
struct rte_mempool *mp)
{
struct sfc_repr_proxy_port *port;
struct sfc_repr_proxy_rxq *rxq;
struct sfc_repr_proxy *rp;
struct sfc_adapter *sa;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
return ENOENT;
}
rxq = &port->rxq[queue_id];
if (rp->dp_rxq[queue_id].mp != NULL && rp->dp_rxq[queue_id].mp != mp) {
sfc_err(sa, "multiple mempools per queue are not supported");
sfc_put_adapter(sa);
return ENOTSUP;
}
rxq->ring = rx_ring;
rxq->mb_pool = mp;
rp->dp_rxq[queue_id].mp = mp;
rp->dp_rxq[queue_id].ref_count++;
sfc_log_init(sa, "done");
sfc_put_adapter(sa);
return 0;
}
void
sfc_repr_proxy_del_rxq(uint16_t pf_port_id, uint16_t repr_id,
uint16_t queue_id)
{
struct sfc_repr_proxy_port *port;
struct sfc_repr_proxy_rxq *rxq;
struct sfc_repr_proxy *rp;
struct sfc_adapter *sa;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
return;
}
rxq = &port->rxq[queue_id];
rxq->ring = NULL;
rxq->mb_pool = NULL;
rp->dp_rxq[queue_id].ref_count--;
if (rp->dp_rxq[queue_id].ref_count == 0)
rp->dp_rxq[queue_id].mp = NULL;
sfc_log_init(sa, "done");
sfc_put_adapter(sa);
}
int
sfc_repr_proxy_add_txq(uint16_t pf_port_id, uint16_t repr_id,
uint16_t queue_id, struct rte_ring *tx_ring,
efx_mport_id_t *egress_mport)
{
struct sfc_repr_proxy_port *port;
struct sfc_repr_proxy_txq *txq;
struct sfc_repr_proxy *rp;
struct sfc_adapter *sa;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
return ENOENT;
}
txq = &port->txq[queue_id];
txq->ring = tx_ring;
*egress_mport = port->egress_mport;
sfc_log_init(sa, "done");
sfc_put_adapter(sa);
return 0;
}
void
sfc_repr_proxy_del_txq(uint16_t pf_port_id, uint16_t repr_id,
uint16_t queue_id)
{
struct sfc_repr_proxy_port *port;
struct sfc_repr_proxy_txq *txq;
struct sfc_repr_proxy *rp;
struct sfc_adapter *sa;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
return;
}
txq = &port->txq[queue_id];
txq->ring = NULL;
sfc_log_init(sa, "done");
sfc_put_adapter(sa);
}

View File

@ -12,8 +12,13 @@
#include <stdint.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include "efx.h"
#include "sfc_repr.h"
#ifdef __cplusplus
extern "C" {
#endif
@ -26,11 +31,27 @@ extern "C" {
#define SFC_REPR_PROXY_NB_TXQ_MIN (1)
#define SFC_REPR_PROXY_NB_TXQ_MAX (1)
struct sfc_repr_proxy_rxq {
struct rte_ring *ring;
struct rte_mempool *mb_pool;
};
struct sfc_repr_proxy_txq {
struct rte_ring *ring;
};
struct sfc_repr_proxy_port {
TAILQ_ENTRY(sfc_repr_proxy_port) entries;
uint16_t repr_id;
uint16_t rte_port_id;
efx_mport_id_t egress_mport;
struct sfc_repr_proxy_rxq rxq[SFC_REPR_RXQ_MAX];
struct sfc_repr_proxy_txq txq[SFC_REPR_TXQ_MAX];
};
struct sfc_repr_proxy_dp_rxq {
struct rte_mempool *mp;
unsigned int ref_count;
};
enum sfc_repr_proxy_mbox_op {
@ -54,6 +75,7 @@ struct sfc_repr_proxy {
efx_mport_id_t mport_alias;
struct sfc_repr_proxy_ports ports;
bool started;
struct sfc_repr_proxy_dp_rxq dp_rxq[SFC_REPR_PROXY_NB_RXQ_MAX];
struct sfc_repr_proxy_mbox mbox;
};

View File

@ -12,6 +12,9 @@
#include <stdint.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include "efx.h"
#ifdef __cplusplus
@ -23,6 +26,18 @@ int sfc_repr_proxy_add_port(uint16_t pf_port_id, uint16_t repr_id,
const efx_mport_sel_t *mport_set);
int sfc_repr_proxy_del_port(uint16_t pf_port_id, uint16_t repr_id);
int sfc_repr_proxy_add_rxq(uint16_t pf_port_id, uint16_t repr_id,
uint16_t queue_id, struct rte_ring *rx_ring,
struct rte_mempool *mp);
void sfc_repr_proxy_del_rxq(uint16_t pf_port_id, uint16_t repr_id,
uint16_t queue_id);
int sfc_repr_proxy_add_txq(uint16_t pf_port_id, uint16_t repr_id,
uint16_t queue_id, struct rte_ring *tx_ring,
efx_mport_id_t *egress_mport);
void sfc_repr_proxy_del_txq(uint16_t pf_port_id, uint16_t repr_id,
uint16_t queue_id);
#ifdef __cplusplus
}
#endif