ethdev: change queue release callback
Currently, most ethdev callback API use queue ID as parameter, but Rx and Tx queue release callback use queue object which is used by Rx and Tx burst data plane callback. To align with other eth device queue configuration callbacks: - queue release callbacks are changed to use queue ID - all drivers are adapted Signed-off-by: Xueming Li <xuemingl@nvidia.com> Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Somnath Kotur <somnath.kotur@broadcom.com> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
parent
49ed322469
commit
7483341ae5
@ -54,8 +54,8 @@ struct atl_adapter {
|
||||
/*
|
||||
* RX/TX function prototypes
|
||||
*/
|
||||
void atl_rx_queue_release(void *rxq);
|
||||
void atl_tx_queue_release(void *txq);
|
||||
void atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
void atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
||||
|
||||
int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
||||
uint16_t nb_rx_desc, unsigned int socket_id,
|
||||
|
@ -125,7 +125,7 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
||||
* different socket than was previously used.
|
||||
*/
|
||||
if (dev->data->rx_queues[rx_queue_id] != NULL) {
|
||||
atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
|
||||
atl_rx_queue_release(dev, rx_queue_id);
|
||||
dev->data->rx_queues[rx_queue_id] = NULL;
|
||||
}
|
||||
|
||||
@ -247,7 +247,7 @@ atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
||||
* different socket than was previously used.
|
||||
*/
|
||||
if (dev->data->tx_queues[tx_queue_id] != NULL) {
|
||||
atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
|
||||
atl_tx_queue_release(dev, tx_queue_id);
|
||||
dev->data->tx_queues[tx_queue_id] = NULL;
|
||||
}
|
||||
|
||||
@ -498,13 +498,13 @@ atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
}
|
||||
|
||||
void
|
||||
atl_rx_queue_release(void *rx_queue)
|
||||
atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
{
|
||||
struct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (rx_queue != NULL) {
|
||||
struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
|
||||
|
||||
if (rxq != NULL) {
|
||||
atl_rx_queue_release_mbufs(rxq);
|
||||
rte_free(rxq->sw_ring);
|
||||
rte_free(rxq);
|
||||
@ -569,13 +569,13 @@ atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
||||
}
|
||||
|
||||
void
|
||||
atl_tx_queue_release(void *tx_queue)
|
||||
atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
||||
{
|
||||
struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (tx_queue != NULL) {
|
||||
struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
|
||||
|
||||
if (txq != NULL) {
|
||||
atl_tx_queue_release_mbufs(txq);
|
||||
rte_free(txq->sw_ring);
|
||||
rte_free(txq);
|
||||
@ -590,13 +590,13 @@ atl_free_queues(struct rte_eth_dev *dev)
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
atl_rx_queue_release(dev->data->rx_queues[i]);
|
||||
atl_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = 0;
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
atl_tx_queue_release(dev->data->tx_queues[i]);
|
||||
atl_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = 0;
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
@ -75,8 +75,8 @@ static uint16_t avp_xmit_pkts(void *tx_queue,
|
||||
struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
|
||||
static void avp_dev_rx_queue_release(void *rxq);
|
||||
static void avp_dev_tx_queue_release(void *txq);
|
||||
static void avp_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
static void avp_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
static int avp_dev_stats_get(struct rte_eth_dev *dev,
|
||||
struct rte_eth_stats *stats);
|
||||
@ -1926,18 +1926,11 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
}
|
||||
|
||||
static void
|
||||
avp_dev_rx_queue_release(void *rx_queue)
|
||||
avp_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
|
||||
{
|
||||
struct avp_queue *rxq = (struct avp_queue *)rx_queue;
|
||||
struct avp_dev *avp = rxq->avp;
|
||||
struct rte_eth_dev_data *data = avp->dev_data;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < avp->num_rx_queues; i++) {
|
||||
if (data->rx_queues[i] == rxq) {
|
||||
rte_free(data->rx_queues[i]);
|
||||
data->rx_queues[i] = NULL;
|
||||
}
|
||||
if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
|
||||
rte_free(eth_dev->data->rx_queues[rx_queue_id]);
|
||||
eth_dev->data->rx_queues[rx_queue_id] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1957,18 +1950,11 @@ avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)
|
||||
}
|
||||
|
||||
static void
|
||||
avp_dev_tx_queue_release(void *tx_queue)
|
||||
avp_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
|
||||
{
|
||||
struct avp_queue *txq = (struct avp_queue *)tx_queue;
|
||||
struct avp_dev *avp = txq->avp;
|
||||
struct rte_eth_dev_data *data = avp->dev_data;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < avp->num_tx_queues; i++) {
|
||||
if (data->tx_queues[i] == txq) {
|
||||
rte_free(data->tx_queues[i]);
|
||||
data->tx_queues[i] = NULL;
|
||||
}
|
||||
if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
|
||||
rte_free(eth_dev->data->tx_queues[tx_queue_id]);
|
||||
eth_dev->data->tx_queues[tx_queue_id] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -950,7 +950,7 @@ static int wrapper_rx_desc_init(struct axgbe_port *pdata)
|
||||
if (mbuf == NULL) {
|
||||
PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
|
||||
(unsigned int)rxq->queue_id, j);
|
||||
axgbe_dev_rx_queue_release(rxq);
|
||||
axgbe_dev_rx_queue_release(pdata->eth_dev, i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rxq->sw_ring[j] = mbuf;
|
||||
|
@ -31,9 +31,9 @@ axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
|
||||
}
|
||||
}
|
||||
|
||||
void axgbe_dev_rx_queue_release(void *rxq)
|
||||
void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
axgbe_rx_queue_release(rxq);
|
||||
axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
|
||||
}
|
||||
|
||||
int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
@ -517,9 +517,9 @@ static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
|
||||
}
|
||||
}
|
||||
|
||||
void axgbe_dev_tx_queue_release(void *txq)
|
||||
void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
axgbe_tx_queue_release(txq);
|
||||
axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
||||
}
|
||||
|
||||
int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
|
@ -153,7 +153,7 @@ struct axgbe_tx_queue {
|
||||
*/
|
||||
|
||||
|
||||
void axgbe_dev_tx_queue_release(void *txq);
|
||||
void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
||||
uint16_t nb_tx_desc, unsigned int socket_id,
|
||||
const struct rte_eth_txconf *tx_conf);
|
||||
@ -171,7 +171,7 @@ uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
|
||||
|
||||
void axgbe_dev_rx_queue_release(void *rxq);
|
||||
void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
||||
uint16_t nb_rx_desc, unsigned int socket_id,
|
||||
const struct rte_eth_rxconf *rx_conf,
|
||||
|
@ -37,9 +37,9 @@ bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)
|
||||
}
|
||||
|
||||
void
|
||||
bnx2x_dev_rx_queue_release(void *rxq)
|
||||
bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
bnx2x_rx_queue_release(rxq);
|
||||
bnx2x_rx_queue_release(dev->data->rx_queues[queue_idx]);
|
||||
}
|
||||
|
||||
int
|
||||
@ -182,9 +182,9 @@ bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)
|
||||
}
|
||||
|
||||
void
|
||||
bnx2x_dev_tx_queue_release(void *txq)
|
||||
bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
bnx2x_tx_queue_release(txq);
|
||||
bnx2x_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
|
@ -72,8 +72,8 @@ int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
||||
uint16_t nb_tx_desc, unsigned int socket_id,
|
||||
const struct rte_eth_txconf *tx_conf);
|
||||
|
||||
void bnx2x_dev_rx_queue_release(void *rxq);
|
||||
void bnx2x_dev_tx_queue_release(void *txq);
|
||||
void bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
void bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev);
|
||||
void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev);
|
||||
void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);
|
||||
|
@ -630,7 +630,7 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
if (eth_dev->data->rx_queues) {
|
||||
rxq = eth_dev->data->rx_queues[queue_idx];
|
||||
if (rxq)
|
||||
bnxt_rx_queue_release_op(rxq);
|
||||
bnxt_rx_queue_release_op(eth_dev, queue_idx);
|
||||
}
|
||||
|
||||
rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
|
||||
@ -641,6 +641,8 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
eth_dev->data->rx_queues[queue_idx] = rxq;
|
||||
|
||||
rxq->nb_rx_desc = nb_desc;
|
||||
|
||||
rc = bnxt_init_rep_rx_ring(rxq, socket_id);
|
||||
@ -660,20 +662,19 @@ int bnxt_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
rxq->rx_ring->rx_buf_ring = buf_ring;
|
||||
rxq->queue_id = queue_idx;
|
||||
rxq->port_id = eth_dev->data->port_id;
|
||||
eth_dev->data->rx_queues[queue_idx] = rxq;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (rxq)
|
||||
bnxt_rep_rx_queue_release_op(rxq);
|
||||
bnxt_rep_rx_queue_release_op(eth_dev, queue_idx);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnxt_rep_rx_queue_release_op(void *rx_queue)
|
||||
void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
|
||||
struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
|
||||
|
||||
if (!rxq)
|
||||
return;
|
||||
@ -728,8 +729,8 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
|
||||
if (eth_dev->data->tx_queues) {
|
||||
vfr_txq = eth_dev->data->tx_queues[queue_idx];
|
||||
bnxt_rep_tx_queue_release_op(vfr_txq);
|
||||
vfr_txq = NULL;
|
||||
if (vfr_txq != NULL)
|
||||
bnxt_rep_tx_queue_release_op(eth_dev, queue_idx);
|
||||
}
|
||||
|
||||
vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
|
||||
@ -758,15 +759,16 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bnxt_rep_tx_queue_release_op(void *tx_queue)
|
||||
void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
|
||||
struct bnxt_vf_rep_tx_queue *vfr_txq = dev->data->tx_queues[queue_idx];
|
||||
|
||||
if (!vfr_txq)
|
||||
return;
|
||||
|
||||
rte_free(vfr_txq->txq);
|
||||
rte_free(vfr_txq);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
|
||||
|
@ -42,8 +42,8 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
__rte_unused unsigned int socket_id,
|
||||
__rte_unused const struct rte_eth_txconf *
|
||||
tx_conf);
|
||||
void bnxt_rep_rx_queue_release_op(void *rx_queue);
|
||||
void bnxt_rep_tx_queue_release_op(void *tx_queue);
|
||||
void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
int bnxt_rep_dev_stop_op(struct rte_eth_dev *eth_dev);
|
||||
int bnxt_rep_dev_close_op(struct rte_eth_dev *eth_dev);
|
||||
int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
|
||||
|
@ -640,7 +640,7 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
|
||||
if (rxq->rx_started) {
|
||||
if (bnxt_init_one_rx_ring(rxq)) {
|
||||
PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
|
||||
bnxt_rx_queue_release_op(rxq);
|
||||
bnxt_rx_queue_release_op(bp->eth_dev, queue_index);
|
||||
rc = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
@ -240,9 +240,9 @@ void bnxt_free_rx_mbufs(struct bnxt *bp)
|
||||
}
|
||||
}
|
||||
|
||||
void bnxt_rx_queue_release_op(void *rx_queue)
|
||||
void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
|
||||
struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
|
||||
|
||||
if (rxq) {
|
||||
if (is_bnxt_in_error(rxq->bp))
|
||||
@ -273,6 +273,7 @@ void bnxt_rx_queue_release_op(void *rx_queue)
|
||||
rxq->mz = NULL;
|
||||
|
||||
rte_free(rxq);
|
||||
dev->data->rx_queues[queue_idx] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -307,7 +308,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
if (eth_dev->data->rx_queues) {
|
||||
rxq = eth_dev->data->rx_queues[queue_idx];
|
||||
if (rxq)
|
||||
bnxt_rx_queue_release_op(rxq);
|
||||
bnxt_rx_queue_release_op(eth_dev, queue_idx);
|
||||
}
|
||||
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
@ -328,6 +329,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
|
||||
PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
|
||||
|
||||
eth_dev->data->rx_queues[queue_idx] = rxq;
|
||||
|
||||
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
|
||||
if (rc) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
@ -343,7 +346,6 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
else
|
||||
rxq->crc_len = 0;
|
||||
|
||||
eth_dev->data->rx_queues[queue_idx] = rxq;
|
||||
/* Allocate RX ring hardware descriptors */
|
||||
rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
|
||||
NULL, "rxr");
|
||||
@ -369,7 +371,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
|
||||
return 0;
|
||||
err:
|
||||
bnxt_rx_queue_release_op(rxq);
|
||||
bnxt_rx_queue_release_op(eth_dev, queue_idx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ struct bnxt_rx_queue {
|
||||
|
||||
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
|
||||
int bnxt_mq_rx_configure(struct bnxt *bp);
|
||||
void bnxt_rx_queue_release_op(void *rx_queue);
|
||||
void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
uint16_t queue_idx,
|
||||
uint16_t nb_desc,
|
||||
|
@ -53,9 +53,9 @@ void bnxt_free_tx_mbufs(struct bnxt *bp)
|
||||
}
|
||||
}
|
||||
|
||||
void bnxt_tx_queue_release_op(void *tx_queue)
|
||||
void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
|
||||
struct bnxt_tx_queue *txq = dev->data->tx_queues[queue_idx];
|
||||
|
||||
if (txq) {
|
||||
if (is_bnxt_in_error(txq->bp))
|
||||
@ -83,6 +83,7 @@ void bnxt_tx_queue_release_op(void *tx_queue)
|
||||
|
||||
rte_free(txq->free);
|
||||
rte_free(txq);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -114,10 +115,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
|
||||
if (eth_dev->data->tx_queues) {
|
||||
txq = eth_dev->data->tx_queues[queue_idx];
|
||||
if (txq) {
|
||||
bnxt_tx_queue_release_op(txq);
|
||||
txq = NULL;
|
||||
}
|
||||
if (txq)
|
||||
bnxt_tx_queue_release_op(eth_dev, queue_idx);
|
||||
}
|
||||
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
@ -126,6 +125,9 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
txq->bp = bp;
|
||||
eth_dev->data->tx_queues[queue_idx] = txq;
|
||||
|
||||
txq->free = rte_zmalloc_socket(NULL,
|
||||
sizeof(struct rte_mbuf *) * nb_desc,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
@ -134,7 +136,6 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
txq->bp = bp;
|
||||
txq->nb_tx_desc = nb_desc;
|
||||
txq->tx_free_thresh =
|
||||
RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST);
|
||||
@ -164,10 +165,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
goto err;
|
||||
}
|
||||
|
||||
eth_dev->data->tx_queues[queue_idx] = txq;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
bnxt_tx_queue_release_op(txq);
|
||||
bnxt_tx_queue_release_op(eth_dev, queue_idx);
|
||||
return rc;
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ struct bnxt_tx_queue {
|
||||
|
||||
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
|
||||
void bnxt_free_tx_mbufs(struct bnxt *bp);
|
||||
void bnxt_tx_queue_release_op(void *tx_queue);
|
||||
void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
uint16_t queue_idx,
|
||||
uint16_t nb_desc,
|
||||
|
@ -2332,8 +2332,10 @@ bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
||||
}
|
||||
|
||||
static void
|
||||
bond_ethdev_rx_queue_release(void *queue)
|
||||
bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
|
||||
{
|
||||
void *queue = dev->data->rx_queues[queue_id];
|
||||
|
||||
if (queue == NULL)
|
||||
return;
|
||||
|
||||
@ -2341,8 +2343,10 @@ bond_ethdev_rx_queue_release(void *queue)
|
||||
}
|
||||
|
||||
static void
|
||||
bond_ethdev_tx_queue_release(void *queue)
|
||||
bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
|
||||
{
|
||||
void *queue = dev->data->rx_queues[queue_id];
|
||||
|
||||
if (queue == NULL)
|
||||
return;
|
||||
|
||||
|
@ -346,7 +346,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
|
||||
/* Free memory prior to re-allocation if needed. */
|
||||
if (eth_dev->data->tx_queues[qid] != NULL) {
|
||||
plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
|
||||
dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
|
||||
dev_ops->tx_queue_release(eth_dev, qid);
|
||||
eth_dev->data->tx_queues[qid] = NULL;
|
||||
}
|
||||
|
||||
@ -396,20 +396,20 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
|
||||
}
|
||||
|
||||
static void
|
||||
cnxk_nix_tx_queue_release(void *txq)
|
||||
cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
|
||||
{
|
||||
void *txq = eth_dev->data->tx_queues[qid];
|
||||
struct cnxk_eth_txq_sp *txq_sp;
|
||||
struct cnxk_eth_dev *dev;
|
||||
struct roc_nix_sq *sq;
|
||||
uint16_t qid;
|
||||
int rc;
|
||||
|
||||
if (!txq)
|
||||
return;
|
||||
|
||||
txq_sp = cnxk_eth_txq_to_sp(txq);
|
||||
|
||||
dev = txq_sp->dev;
|
||||
qid = txq_sp->qid;
|
||||
|
||||
plt_nix_dbg("Releasing txq %u", qid);
|
||||
|
||||
@ -464,7 +464,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
|
||||
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
|
||||
|
||||
plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
|
||||
dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
|
||||
dev_ops->rx_queue_release(eth_dev, qid);
|
||||
eth_dev->data->rx_queues[qid] = NULL;
|
||||
}
|
||||
|
||||
@ -572,13 +572,13 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
|
||||
}
|
||||
|
||||
static void
|
||||
cnxk_nix_rx_queue_release(void *rxq)
|
||||
cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
|
||||
{
|
||||
void *rxq = eth_dev->data->rx_queues[qid];
|
||||
struct cnxk_eth_rxq_sp *rxq_sp;
|
||||
struct cnxk_eth_dev *dev;
|
||||
struct roc_nix_rq *rq;
|
||||
struct roc_nix_cq *cq;
|
||||
uint16_t qid;
|
||||
int rc;
|
||||
|
||||
if (!rxq)
|
||||
@ -586,7 +586,6 @@ cnxk_nix_rx_queue_release(void *rxq)
|
||||
|
||||
rxq_sp = cnxk_eth_rxq_to_sp(rxq);
|
||||
dev = rxq_sp->dev;
|
||||
qid = rxq_sp->qid;
|
||||
rq = &dev->rqs[qid];
|
||||
|
||||
plt_nix_dbg("Releasing rxq %u", qid);
|
||||
@ -755,7 +754,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
|
||||
txq_sp = cnxk_eth_txq_to_sp(txq[i]);
|
||||
memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
|
||||
tx_qconf[i].valid = true;
|
||||
dev_ops->tx_queue_release(txq[i]);
|
||||
dev_ops->tx_queue_release(eth_dev, i);
|
||||
eth_dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
|
||||
@ -769,7 +768,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
|
||||
rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
|
||||
memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
|
||||
rx_qconf[i].valid = true;
|
||||
dev_ops->rx_queue_release(rxq[i]);
|
||||
dev_ops->rx_queue_release(eth_dev, i);
|
||||
eth_dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
|
||||
@ -791,7 +790,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
|
||||
struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
|
||||
struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
|
||||
int rc, i, nb_rxq, nb_txq;
|
||||
void **txq, **rxq;
|
||||
|
||||
nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
|
||||
nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
|
||||
@ -826,9 +824,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
|
||||
&tx_qconf[i].conf.tx);
|
||||
if (rc) {
|
||||
plt_err("Failed to setup tx queue rc=%d", rc);
|
||||
txq = eth_dev->data->tx_queues;
|
||||
for (i -= 1; i >= 0; i--)
|
||||
dev_ops->tx_queue_release(txq[i]);
|
||||
dev_ops->tx_queue_release(eth_dev, i);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@ -844,9 +841,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
|
||||
rx_qconf[i].mp);
|
||||
if (rc) {
|
||||
plt_err("Failed to setup rx queue rc=%d", rc);
|
||||
rxq = eth_dev->data->rx_queues;
|
||||
for (i -= 1; i >= 0; i--)
|
||||
dev_ops->rx_queue_release(rxq[i]);
|
||||
dev_ops->rx_queue_release(eth_dev, i);
|
||||
goto tx_queue_release;
|
||||
}
|
||||
}
|
||||
@ -857,9 +853,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
|
||||
return 0;
|
||||
|
||||
tx_queue_release:
|
||||
txq = eth_dev->data->tx_queues;
|
||||
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
|
||||
dev_ops->tx_queue_release(txq[i]);
|
||||
dev_ops->tx_queue_release(eth_dev, i);
|
||||
fail:
|
||||
if (tx_qconf)
|
||||
free(tx_qconf);
|
||||
@ -1664,14 +1659,14 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
|
||||
|
||||
/* Free up SQs */
|
||||
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
|
||||
dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
|
||||
dev_ops->tx_queue_release(eth_dev, i);
|
||||
eth_dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
eth_dev->data->nb_tx_queues = 0;
|
||||
|
||||
/* Free up RQ's and CQ's */
|
||||
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
|
||||
dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
|
||||
dev_ops->rx_queue_release(eth_dev, i);
|
||||
eth_dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
eth_dev->data->nb_rx_queues = 0;
|
||||
|
@ -532,7 +532,7 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
|
||||
/* Free up the existing queue */
|
||||
if (eth_dev->data->tx_queues[queue_idx]) {
|
||||
cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
|
||||
cxgbe_dev_tx_queue_release(eth_dev, queue_idx);
|
||||
eth_dev->data->tx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
@ -565,9 +565,9 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
void cxgbe_dev_tx_queue_release(void *q)
|
||||
void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
|
||||
{
|
||||
struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
|
||||
struct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];
|
||||
|
||||
if (txq) {
|
||||
struct port_info *pi = (struct port_info *)
|
||||
@ -655,7 +655,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
|
||||
/* Free up the existing queue */
|
||||
if (eth_dev->data->rx_queues[queue_idx]) {
|
||||
cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
|
||||
cxgbe_dev_rx_queue_release(eth_dev, queue_idx);
|
||||
eth_dev->data->rx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
@ -701,9 +701,9 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
return err;
|
||||
}
|
||||
|
||||
void cxgbe_dev_rx_queue_release(void *q)
|
||||
void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
|
||||
{
|
||||
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
|
||||
struct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
|
||||
|
||||
if (rxq) {
|
||||
struct port_info *pi = (struct port_info *)
|
||||
|
@ -16,8 +16,8 @@
|
||||
V_FW_PARAMS_PARAM_Y(0) | \
|
||||
V_FW_PARAMS_PARAM_Z(0))
|
||||
|
||||
void cxgbe_dev_rx_queue_release(void *q);
|
||||
void cxgbe_dev_tx_queue_release(void *q);
|
||||
void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);
|
||||
void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);
|
||||
int cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
|
||||
int cxgbe_dev_close(struct rte_eth_dev *eth_dev);
|
||||
int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
|
||||
|
@ -978,9 +978,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
static void
|
||||
dpaa2_dev_rx_queue_release(void *q __rte_unused)
|
||||
dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
{
|
||||
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
|
||||
struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
|
||||
struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
|
||||
struct fsl_mc_io *dpni =
|
||||
(struct fsl_mc_io *)priv->eth_dev->process_private;
|
||||
|
@ -386,8 +386,8 @@ extern const struct rte_flow_ops igb_flow_ops;
|
||||
/*
|
||||
* RX/TX IGB function prototypes
|
||||
*/
|
||||
void eth_igb_tx_queue_release(void *txq);
|
||||
void eth_igb_rx_queue_release(void *rxq);
|
||||
void eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void igb_dev_clear_queues(struct rte_eth_dev *dev);
|
||||
void igb_dev_free_queues(struct rte_eth_dev *dev);
|
||||
|
||||
@ -462,8 +462,8 @@ uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
|
||||
/*
|
||||
* RX/TX EM function prototypes
|
||||
*/
|
||||
void eth_em_tx_queue_release(void *txq);
|
||||
void eth_em_rx_queue_release(void *rxq);
|
||||
void eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
void em_dev_clear_queues(struct rte_eth_dev *dev);
|
||||
void em_dev_free_queues(struct rte_eth_dev *dev);
|
||||
|
@ -1121,9 +1121,9 @@ em_tx_queue_release(struct em_tx_queue *txq)
|
||||
}
|
||||
|
||||
void
|
||||
eth_em_tx_queue_release(void *txq)
|
||||
eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
em_tx_queue_release(txq);
|
||||
em_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
/* (Re)set dynamic em_tx_queue fields to defaults */
|
||||
@ -1343,9 +1343,9 @@ em_rx_queue_release(struct em_rx_queue *rxq)
|
||||
}
|
||||
|
||||
void
|
||||
eth_em_rx_queue_release(void *rxq)
|
||||
eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
em_rx_queue_release(rxq);
|
||||
em_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
/* Reset dynamic em_rx_queue fields back to defaults */
|
||||
@ -1609,14 +1609,14 @@ em_dev_free_queues(struct rte_eth_dev *dev)
|
||||
uint16_t i;
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
eth_em_rx_queue_release(dev->data->rx_queues[i]);
|
||||
eth_em_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "rx_ring", i);
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
eth_em_tx_queue_release(dev->data->tx_queues[i]);
|
||||
eth_em_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "tx_ring", i);
|
||||
}
|
||||
|
@ -1281,9 +1281,9 @@ igb_tx_queue_release(struct igb_tx_queue *txq)
|
||||
}
|
||||
|
||||
void
|
||||
eth_igb_tx_queue_release(void *txq)
|
||||
eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
igb_tx_queue_release(txq);
|
||||
igb_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1606,9 +1606,9 @@ igb_rx_queue_release(struct igb_rx_queue *rxq)
|
||||
}
|
||||
|
||||
void
|
||||
eth_igb_rx_queue_release(void *rxq)
|
||||
eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
igb_rx_queue_release(rxq);
|
||||
igb_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1883,14 +1883,14 @@ igb_dev_free_queues(struct rte_eth_dev *dev)
|
||||
uint16_t i;
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
eth_igb_rx_queue_release(dev->data->rx_queues[i]);
|
||||
eth_igb_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "rx_ring", i);
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
eth_igb_tx_queue_release(dev->data->tx_queues[i]);
|
||||
eth_igb_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "tx_ring", i);
|
||||
}
|
||||
|
@ -192,8 +192,8 @@ static int ena_dev_reset(struct rte_eth_dev *dev);
|
||||
static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
|
||||
static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
|
||||
static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
|
||||
static void ena_rx_queue_release(void *queue);
|
||||
static void ena_tx_queue_release(void *queue);
|
||||
static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
static void ena_rx_queue_release_bufs(struct ena_ring *ring);
|
||||
static void ena_tx_queue_release_bufs(struct ena_ring *ring);
|
||||
static int ena_link_update(struct rte_eth_dev *dev,
|
||||
@ -525,27 +525,25 @@ ena_dev_reset(struct rte_eth_dev *dev)
|
||||
|
||||
static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
|
||||
int nb_queues = dev->data->nb_rx_queues;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nb_queues; i++)
|
||||
ena_rx_queue_release(queues[i]);
|
||||
ena_rx_queue_release(dev, i);
|
||||
}
|
||||
|
||||
static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
|
||||
int nb_queues = dev->data->nb_tx_queues;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nb_queues; i++)
|
||||
ena_tx_queue_release(queues[i]);
|
||||
ena_tx_queue_release(dev, i);
|
||||
}
|
||||
|
||||
static void ena_rx_queue_release(void *queue)
|
||||
static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct ena_ring *ring = (struct ena_ring *)queue;
|
||||
struct ena_ring *ring = dev->data->rx_queues[qid];
|
||||
|
||||
/* Free ring resources */
|
||||
if (ring->rx_buffer_info)
|
||||
@ -566,9 +564,9 @@ static void ena_rx_queue_release(void *queue)
|
||||
ring->port_id, ring->id);
|
||||
}
|
||||
|
||||
static void ena_tx_queue_release(void *queue)
|
||||
static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct ena_ring *ring = (struct ena_ring *)queue;
|
||||
struct ena_ring *ring = dev->data->tx_queues[qid];
|
||||
|
||||
/* Free ring resources */
|
||||
if (ring->push_buf_intermediate_buf)
|
||||
|
@ -325,8 +325,10 @@ enetc_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
static void
|
||||
enetc_tx_queue_release(void *txq)
|
||||
enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
void *txq = dev->data->tx_queues[qid];
|
||||
|
||||
if (txq == NULL)
|
||||
return;
|
||||
|
||||
@ -473,8 +475,10 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
static void
|
||||
enetc_rx_queue_release(void *rxq)
|
||||
enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
void *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
if (rxq == NULL)
|
||||
return;
|
||||
|
||||
@ -561,13 +565,13 @@ enetc_dev_close(struct rte_eth_dev *dev)
|
||||
ret = enetc_dev_stop(dev);
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
enetc_rx_queue_release(dev->data->rx_queues[i]);
|
||||
enetc_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
enetc_tx_queue_release(dev->data->tx_queues[i]);
|
||||
enetc_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
@ -88,8 +88,10 @@ enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void enicpmd_dev_tx_queue_release(void *txq)
|
||||
static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
void *txq = dev->data->tx_queues[qid];
|
||||
|
||||
ENICPMD_FUNC_TRACE();
|
||||
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
||||
@ -223,8 +225,10 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void enicpmd_dev_rx_queue_release(void *rxq)
|
||||
static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
void *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
ENICPMD_FUNC_TRACE();
|
||||
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
||||
|
@ -70,8 +70,10 @@ static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void enic_vf_dev_tx_queue_release(void *txq)
|
||||
static void enic_vf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
void *txq = dev->data->tx_queues[qid];
|
||||
|
||||
ENICPMD_FUNC_TRACE();
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
||||
return;
|
||||
@ -108,8 +110,10 @@ static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void enic_vf_dev_rx_queue_release(void *rxq)
|
||||
static void enic_vf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
void *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
ENICPMD_FUNC_TRACE();
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
||||
return;
|
||||
|
@ -358,26 +358,21 @@ fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
||||
}
|
||||
|
||||
static void
|
||||
fs_rx_queue_release(void *queue)
|
||||
fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct rte_eth_dev *dev;
|
||||
struct sub_device *sdev;
|
||||
uint8_t i;
|
||||
struct rxq *rxq;
|
||||
struct rxq *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
if (queue == NULL)
|
||||
if (rxq == NULL)
|
||||
return;
|
||||
rxq = queue;
|
||||
dev = &rte_eth_devices[rxq->priv->data->port_id];
|
||||
fs_lock(dev, 0);
|
||||
if (rxq->event_fd >= 0)
|
||||
close(rxq->event_fd);
|
||||
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
|
||||
if (ETH(sdev)->data->rx_queues != NULL &&
|
||||
ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
|
||||
SUBOPS(sdev, rx_queue_release)
|
||||
(ETH(sdev)->data->rx_queues[rxq->qid]);
|
||||
}
|
||||
ETH(sdev)->data->rx_queues[rxq->qid] != NULL)
|
||||
SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid);
|
||||
}
|
||||
dev->data->rx_queues[rxq->qid] = NULL;
|
||||
rte_free(rxq);
|
||||
@ -420,7 +415,7 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
rxq = dev->data->rx_queues[rx_queue_id];
|
||||
if (rxq != NULL) {
|
||||
fs_rx_queue_release(rxq);
|
||||
fs_rx_queue_release(dev, rx_queue_id);
|
||||
dev->data->rx_queues[rx_queue_id] = NULL;
|
||||
}
|
||||
rxq = rte_zmalloc(NULL,
|
||||
@ -460,7 +455,7 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
fs_unlock(dev, 0);
|
||||
return 0;
|
||||
free_rxq:
|
||||
fs_rx_queue_release(rxq);
|
||||
fs_rx_queue_release(dev, rx_queue_id);
|
||||
fs_unlock(dev, 0);
|
||||
return ret;
|
||||
}
|
||||
@ -542,24 +537,19 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
|
||||
}
|
||||
|
||||
static void
|
||||
fs_tx_queue_release(void *queue)
|
||||
fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct rte_eth_dev *dev;
|
||||
struct sub_device *sdev;
|
||||
uint8_t i;
|
||||
struct txq *txq;
|
||||
struct txq *txq = dev->data->tx_queues[qid];
|
||||
|
||||
if (queue == NULL)
|
||||
if (txq == NULL)
|
||||
return;
|
||||
txq = queue;
|
||||
dev = &rte_eth_devices[txq->priv->data->port_id];
|
||||
fs_lock(dev, 0);
|
||||
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
|
||||
if (ETH(sdev)->data->tx_queues != NULL &&
|
||||
ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
|
||||
SUBOPS(sdev, tx_queue_release)
|
||||
(ETH(sdev)->data->tx_queues[txq->qid]);
|
||||
}
|
||||
ETH(sdev)->data->tx_queues[txq->qid] != NULL)
|
||||
SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid);
|
||||
}
|
||||
dev->data->tx_queues[txq->qid] = NULL;
|
||||
rte_free(txq);
|
||||
@ -591,7 +581,7 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
txq = dev->data->tx_queues[tx_queue_id];
|
||||
if (txq != NULL) {
|
||||
fs_tx_queue_release(txq);
|
||||
fs_tx_queue_release(dev, tx_queue_id);
|
||||
dev->data->tx_queues[tx_queue_id] = NULL;
|
||||
}
|
||||
txq = rte_zmalloc("ethdev TX queue",
|
||||
@ -623,7 +613,7 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
fs_unlock(dev, 0);
|
||||
return 0;
|
||||
free_txq:
|
||||
fs_tx_queue_release(txq);
|
||||
fs_tx_queue_release(dev, tx_queue_id);
|
||||
fs_unlock(dev, 0);
|
||||
return ret;
|
||||
}
|
||||
@ -634,12 +624,12 @@ fs_dev_free_queues(struct rte_eth_dev *dev)
|
||||
uint16_t i;
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
fs_rx_queue_release(dev->data->rx_queues[i]);
|
||||
fs_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
fs_tx_queue_release(dev->data->tx_queues[i]);
|
||||
fs_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
@ -51,8 +51,8 @@ static int
|
||||
fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
|
||||
static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
|
||||
const u8 *mac, bool add, uint32_t pool);
|
||||
static void fm10k_tx_queue_release(void *queue);
|
||||
static void fm10k_rx_queue_release(void *queue);
|
||||
static void fm10k_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
static void fm10k_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
static void fm10k_set_rx_function(struct rte_eth_dev *dev);
|
||||
static void fm10k_set_tx_function(struct rte_eth_dev *dev);
|
||||
static int fm10k_check_ftag(struct rte_devargs *devargs);
|
||||
@ -1210,7 +1210,7 @@ fm10k_dev_queue_release(struct rte_eth_dev *dev)
|
||||
|
||||
if (dev->data->rx_queues) {
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++)
|
||||
fm10k_rx_queue_release(dev->data->rx_queues[i]);
|
||||
fm10k_rx_queue_release(dev, i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1891,11 +1891,11 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
}
|
||||
|
||||
static void
|
||||
fm10k_rx_queue_release(void *queue)
|
||||
fm10k_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
rx_queue_free(queue);
|
||||
rx_queue_free(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -2080,9 +2080,9 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
}
|
||||
|
||||
static void
|
||||
fm10k_tx_queue_release(void *queue)
|
||||
fm10k_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct fm10k_tx_queue *q = queue;
|
||||
struct fm10k_tx_queue *q = dev->data->tx_queues[qid];
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
tx_queue_free(q);
|
||||
|
@ -1075,12 +1075,14 @@ static int hinic_dev_start(struct rte_eth_dev *dev)
|
||||
/**
|
||||
* DPDK callback to release the receive queue.
|
||||
*
|
||||
* @param queue
|
||||
* Generic receive queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Receive queue index.
|
||||
*/
|
||||
static void hinic_rx_queue_release(void *queue)
|
||||
static void hinic_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct hinic_rxq *rxq = queue;
|
||||
struct hinic_rxq *rxq = dev->data->rx_queues[qid];
|
||||
struct hinic_nic_dev *nic_dev;
|
||||
|
||||
if (!rxq) {
|
||||
@ -1107,12 +1109,14 @@ static void hinic_rx_queue_release(void *queue)
|
||||
/**
|
||||
* DPDK callback to release the transmit queue.
|
||||
*
|
||||
* @param queue
|
||||
* Generic transmit queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Transmit queue index.
|
||||
*/
|
||||
static void hinic_tx_queue_release(void *queue)
|
||||
static void hinic_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct hinic_txq *txq = queue;
|
||||
struct hinic_txq *txq = dev->data->tx_queues[qid];
|
||||
struct hinic_nic_dev *nic_dev;
|
||||
|
||||
if (!txq) {
|
||||
|
@ -108,8 +108,8 @@ hns3_tx_queue_release(void *queue)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
hns3_dev_rx_queue_release(void *queue)
|
||||
static void
|
||||
hns3_rx_queue_release_lock(void *queue)
|
||||
{
|
||||
struct hns3_rx_queue *rxq = queue;
|
||||
struct hns3_adapter *hns;
|
||||
@ -124,7 +124,13 @@ hns3_dev_rx_queue_release(void *queue)
|
||||
}
|
||||
|
||||
void
|
||||
hns3_dev_tx_queue_release(void *queue)
|
||||
hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
|
||||
{
|
||||
hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
|
||||
}
|
||||
|
||||
static void
|
||||
hns3_tx_queue_release_lock(void *queue)
|
||||
{
|
||||
struct hns3_tx_queue *txq = queue;
|
||||
struct hns3_adapter *hns;
|
||||
@ -138,6 +144,12 @@ hns3_dev_tx_queue_release(void *queue)
|
||||
rte_spinlock_unlock(&hns->hw.lock);
|
||||
}
|
||||
|
||||
void
|
||||
hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
|
||||
{
|
||||
hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
|
||||
}
|
||||
|
||||
static void
|
||||
hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
|
||||
{
|
||||
@ -1536,7 +1548,7 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
|
||||
/* re-configure */
|
||||
rxq = hw->fkq_data.rx_queues;
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
hns3_dev_rx_queue_release(rxq[i]);
|
||||
hns3_rx_queue_release_lock(rxq[i]);
|
||||
|
||||
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
@ -1551,7 +1563,7 @@ hns3_fake_rx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
|
||||
} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
|
||||
rxq = hw->fkq_data.rx_queues;
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
hns3_dev_rx_queue_release(rxq[i]);
|
||||
hns3_rx_queue_release_lock(rxq[i]);
|
||||
|
||||
rte_free(hw->fkq_data.rx_queues);
|
||||
hw->fkq_data.rx_queues = NULL;
|
||||
@ -1583,7 +1595,7 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
|
||||
/* re-configure */
|
||||
txq = hw->fkq_data.tx_queues;
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
hns3_dev_tx_queue_release(txq[i]);
|
||||
hns3_tx_queue_release_lock(txq[i]);
|
||||
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (txq == NULL)
|
||||
@ -1597,7 +1609,7 @@ hns3_fake_tx_queue_config(struct hns3_hw *hw, uint16_t nb_queues)
|
||||
} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
|
||||
txq = hw->fkq_data.tx_queues;
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
hns3_dev_tx_queue_release(txq[i]);
|
||||
hns3_tx_queue_release_lock(txq[i]);
|
||||
|
||||
rte_free(hw->fkq_data.tx_queues);
|
||||
hw->fkq_data.tx_queues = NULL;
|
||||
|
@ -677,8 +677,8 @@ hns3_write_txq_tail_reg(struct hns3_tx_queue *txq, uint32_t value)
|
||||
rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
|
||||
}
|
||||
|
||||
void hns3_dev_rx_queue_release(void *queue);
|
||||
void hns3_dev_tx_queue_release(void *queue);
|
||||
void hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
|
||||
void hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
|
||||
void hns3_free_all_queues(struct rte_eth_dev *dev);
|
||||
int hns3_reset_all_tqps(struct hns3_adapter *hns);
|
||||
void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
|
||||
|
@ -264,10 +264,10 @@ i40e_fdir_setup(struct i40e_pf *pf)
|
||||
return I40E_SUCCESS;
|
||||
|
||||
fail_mem:
|
||||
i40e_dev_rx_queue_release(pf->fdir.rxq);
|
||||
i40e_rx_queue_release(pf->fdir.rxq);
|
||||
pf->fdir.rxq = NULL;
|
||||
fail_setup_rx:
|
||||
i40e_dev_tx_queue_release(pf->fdir.txq);
|
||||
i40e_tx_queue_release(pf->fdir.txq);
|
||||
pf->fdir.txq = NULL;
|
||||
fail_setup_tx:
|
||||
i40e_vsi_release(vsi);
|
||||
@ -302,10 +302,10 @@ i40e_fdir_teardown(struct i40e_pf *pf)
|
||||
PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
|
||||
|
||||
rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
|
||||
i40e_dev_rx_queue_release(pf->fdir.rxq);
|
||||
i40e_rx_queue_release(pf->fdir.rxq);
|
||||
pf->fdir.rxq = NULL;
|
||||
rte_eth_dma_zone_free(dev, "fdir_tx_ring", pf->fdir.txq->queue_id);
|
||||
i40e_dev_tx_queue_release(pf->fdir.txq);
|
||||
i40e_tx_queue_release(pf->fdir.txq);
|
||||
pf->fdir.txq = NULL;
|
||||
i40e_vsi_release(vsi);
|
||||
pf->fdir.fdir_vsi = NULL;
|
||||
|
@ -1975,7 +1975,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
/* Free memory if needed */
|
||||
if (dev->data->rx_queues[queue_idx]) {
|
||||
i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
|
||||
i40e_rx_queue_release(dev->data->rx_queues[queue_idx]);
|
||||
dev->data->rx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
@ -2019,7 +2019,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
|
||||
ring_size, I40E_RING_BASE_ALIGN, socket_id);
|
||||
if (!rz) {
|
||||
i40e_dev_rx_queue_release(rxq);
|
||||
i40e_rx_queue_release(rxq);
|
||||
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2039,7 +2039,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
socket_id);
|
||||
if (!rxq->sw_ring) {
|
||||
i40e_dev_rx_queue_release(rxq);
|
||||
i40e_rx_queue_release(rxq);
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2062,7 +2062,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
if (dev->data->dev_started) {
|
||||
if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
|
||||
i40e_dev_rx_queue_release(rxq);
|
||||
i40e_rx_queue_release(rxq);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
@ -2092,7 +2092,19 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
void
|
||||
i40e_dev_rx_queue_release(void *rxq)
|
||||
i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
i40e_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
void
|
||||
i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
i40e_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
void
|
||||
i40e_rx_queue_release(void *rxq)
|
||||
{
|
||||
struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
|
||||
|
||||
@ -2389,7 +2401,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
/* Free memory if needed. */
|
||||
if (dev->data->tx_queues[queue_idx]) {
|
||||
i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
||||
i40e_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
@ -2410,7 +2422,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
|
||||
ring_size, I40E_RING_BASE_ALIGN, socket_id);
|
||||
if (!tz) {
|
||||
i40e_dev_tx_queue_release(txq);
|
||||
i40e_tx_queue_release(txq);
|
||||
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2438,7 +2450,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
socket_id);
|
||||
if (!txq->sw_ring) {
|
||||
i40e_dev_tx_queue_release(txq);
|
||||
i40e_tx_queue_release(txq);
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2461,7 +2473,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
if (dev->data->dev_started) {
|
||||
if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
|
||||
i40e_dev_tx_queue_release(txq);
|
||||
i40e_tx_queue_release(txq);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
@ -2477,7 +2489,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
void
|
||||
i40e_dev_tx_queue_release(void *txq)
|
||||
i40e_tx_queue_release(void *txq)
|
||||
{
|
||||
struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
|
||||
|
||||
@ -3042,7 +3054,7 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
if (!dev->data->rx_queues[i])
|
||||
continue;
|
||||
i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
|
||||
i40e_rx_queue_release(dev->data->rx_queues[i]);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "rx_ring", i);
|
||||
}
|
||||
@ -3050,7 +3062,7 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
if (!dev->data->tx_queues[i])
|
||||
continue;
|
||||
i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
|
||||
i40e_tx_queue_release(dev->data->tx_queues[i]);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "tx_ring", i);
|
||||
}
|
||||
@ -3090,7 +3102,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
|
||||
I40E_FDIR_QUEUE_ID, ring_size,
|
||||
I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
|
||||
if (!tz) {
|
||||
i40e_dev_tx_queue_release(txq);
|
||||
i40e_tx_queue_release(txq);
|
||||
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
|
||||
return I40E_ERR_NO_MEMORY;
|
||||
}
|
||||
@ -3148,7 +3160,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
|
||||
I40E_FDIR_QUEUE_ID, ring_size,
|
||||
I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
|
||||
if (!rz) {
|
||||
i40e_dev_rx_queue_release(rxq);
|
||||
i40e_rx_queue_release(rxq);
|
||||
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
|
||||
return I40E_ERR_NO_MEMORY;
|
||||
}
|
||||
|
@ -197,8 +197,10 @@ int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t nb_desc,
|
||||
unsigned int socket_id,
|
||||
const struct rte_eth_txconf *tx_conf);
|
||||
void i40e_dev_rx_queue_release(void *rxq);
|
||||
void i40e_dev_tx_queue_release(void *txq);
|
||||
void i40e_rx_queue_release(void *rxq);
|
||||
void i40e_tx_queue_release(void *txq);
|
||||
void i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
uint16_t i40e_recv_pkts(void *rx_queue,
|
||||
struct rte_mbuf **rx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
|
@ -562,7 +562,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
|
||||
/* Free memory if needed */
|
||||
if (dev->data->rx_queues[queue_idx]) {
|
||||
iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
|
||||
iavf_dev_rx_queue_release(dev, queue_idx);
|
||||
dev->data->rx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
@ -721,7 +721,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
/* Free memory if needed. */
|
||||
if (dev->data->tx_queues[queue_idx]) {
|
||||
iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
||||
iavf_dev_tx_queue_release(dev, queue_idx);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
@ -962,9 +962,9 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
||||
}
|
||||
|
||||
void
|
||||
iavf_dev_rx_queue_release(void *rxq)
|
||||
iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
|
||||
struct iavf_rx_queue *q = dev->data->rx_queues[qid];
|
||||
|
||||
if (!q)
|
||||
return;
|
||||
@ -976,9 +976,9 @@ iavf_dev_rx_queue_release(void *rxq)
|
||||
}
|
||||
|
||||
void
|
||||
iavf_dev_tx_queue_release(void *txq)
|
||||
iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
|
||||
struct iavf_tx_queue *q = dev->data->tx_queues[qid];
|
||||
|
||||
if (!q)
|
||||
return;
|
||||
|
@ -420,7 +420,7 @@ int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
void iavf_dev_rx_queue_release(void *rxq);
|
||||
void iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t queue_idx,
|
||||
@ -430,7 +430,7 @@ int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
||||
int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
||||
int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
|
||||
void iavf_dev_tx_queue_release(void *txq);
|
||||
void iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void iavf_stop_queues(struct rte_eth_dev *dev);
|
||||
uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
|
@ -1060,8 +1060,8 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
|
||||
.dev_infos_get = ice_dcf_dev_info_get,
|
||||
.rx_queue_setup = ice_rx_queue_setup,
|
||||
.tx_queue_setup = ice_tx_queue_setup,
|
||||
.rx_queue_release = ice_rx_queue_release,
|
||||
.tx_queue_release = ice_tx_queue_release,
|
||||
.rx_queue_release = ice_dev_rx_queue_release,
|
||||
.tx_queue_release = ice_dev_tx_queue_release,
|
||||
.rx_queue_start = ice_dcf_rx_queue_start,
|
||||
.tx_queue_start = ice_dcf_tx_queue_start,
|
||||
.rx_queue_stop = ice_dcf_rx_queue_stop,
|
||||
|
@ -208,9 +208,9 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
|
||||
.tx_queue_start = ice_tx_queue_start,
|
||||
.tx_queue_stop = ice_tx_queue_stop,
|
||||
.rx_queue_setup = ice_rx_queue_setup,
|
||||
.rx_queue_release = ice_rx_queue_release,
|
||||
.rx_queue_release = ice_dev_rx_queue_release,
|
||||
.tx_queue_setup = ice_tx_queue_setup,
|
||||
.tx_queue_release = ice_tx_queue_release,
|
||||
.tx_queue_release = ice_dev_tx_queue_release,
|
||||
.dev_infos_get = ice_dev_info_get,
|
||||
.dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
|
||||
.link_update = ice_link_update,
|
||||
|
@ -1391,6 +1391,18 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
ice_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
void
|
||||
ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
ice_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
void
|
||||
ice_tx_queue_release(void *txq)
|
||||
{
|
||||
|
@ -213,6 +213,8 @@ int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
||||
void ice_rx_queue_release(void *rxq);
|
||||
void ice_tx_queue_release(void *txq);
|
||||
void ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void ice_free_queues(struct rte_eth_dev *dev);
|
||||
int ice_fdir_setup_tx_resources(struct ice_pf *pf);
|
||||
int ice_fdir_setup_rx_resources(struct ice_pf *pf);
|
||||
|
@ -1153,13 +1153,13 @@ igc_dev_free_queues(struct rte_eth_dev *dev)
|
||||
uint16_t i;
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
eth_igc_rx_queue_release(dev->data->rx_queues[i]);
|
||||
eth_igc_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
eth_igc_tx_queue_release(dev->data->tx_queues[i]);
|
||||
eth_igc_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
@ -716,10 +716,10 @@ igc_rx_queue_release(struct igc_rx_queue *rxq)
|
||||
rte_free(rxq);
|
||||
}
|
||||
|
||||
void eth_igc_rx_queue_release(void *rxq)
|
||||
void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
if (rxq)
|
||||
igc_rx_queue_release(rxq);
|
||||
if (dev->data->rx_queues[qid])
|
||||
igc_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
|
||||
@ -1899,10 +1899,10 @@ igc_tx_queue_release(struct igc_tx_queue *txq)
|
||||
rte_free(txq);
|
||||
}
|
||||
|
||||
void eth_igc_tx_queue_release(void *txq)
|
||||
void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
if (txq)
|
||||
igc_tx_queue_release(txq);
|
||||
if (dev->data->tx_queues[qid])
|
||||
igc_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -14,8 +14,8 @@ extern "C" {
|
||||
/*
|
||||
* RX/TX function prototypes
|
||||
*/
|
||||
void eth_igc_tx_queue_release(void *txq);
|
||||
void eth_igc_rx_queue_release(void *rxq);
|
||||
void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void igc_dev_clear_queues(struct rte_eth_dev *dev);
|
||||
int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
||||
uint16_t nb_rx_desc, unsigned int socket_id,
|
||||
|
@ -1056,11 +1056,11 @@ ionic_lif_free_queues(struct ionic_lif *lif)
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < lif->ntxqcqs; i++) {
|
||||
ionic_dev_tx_queue_release(lif->eth_dev->data->tx_queues[i]);
|
||||
ionic_dev_tx_queue_release(lif->eth_dev, i);
|
||||
lif->eth_dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < lif->nrxqcqs; i++) {
|
||||
ionic_dev_rx_queue_release(lif->eth_dev->data->rx_queues[i]);
|
||||
ionic_dev_rx_queue_release(lif->eth_dev, i);
|
||||
lif->eth_dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -118,9 +118,9 @@ ionic_tx_flush(struct ionic_tx_qcq *txq)
|
||||
}
|
||||
|
||||
void __rte_cold
|
||||
ionic_dev_tx_queue_release(void *tx_queue)
|
||||
ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct ionic_tx_qcq *txq = tx_queue;
|
||||
struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
|
||||
struct ionic_tx_stats *stats = &txq->stats;
|
||||
|
||||
IONIC_PRINT_CALL();
|
||||
@ -185,8 +185,7 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
|
||||
|
||||
/* Free memory prior to re-allocation if needed... */
|
||||
if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
|
||||
void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
|
||||
ionic_dev_tx_queue_release(tx_queue);
|
||||
ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
|
||||
eth_dev->data->tx_queues[tx_queue_id] = NULL;
|
||||
}
|
||||
|
||||
@ -664,9 +663,9 @@ ionic_rx_empty(struct ionic_rx_qcq *rxq)
|
||||
}
|
||||
|
||||
void __rte_cold
|
||||
ionic_dev_rx_queue_release(void *rx_queue)
|
||||
ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct ionic_rx_qcq *rxq = rx_queue;
|
||||
struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
|
||||
struct ionic_rx_stats *stats;
|
||||
|
||||
if (!rxq)
|
||||
@ -726,8 +725,7 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
|
||||
/* Free memory prior to re-allocation if needed... */
|
||||
if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
|
||||
void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
|
||||
ionic_dev_rx_queue_release(rx_queue);
|
||||
ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
|
||||
eth_dev->data->rx_queues[rx_queue_id] = NULL;
|
||||
}
|
||||
|
||||
|
@ -25,14 +25,14 @@ uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
||||
uint16_t nb_desc, uint32_t socket_id,
|
||||
const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);
|
||||
void ionic_dev_rx_queue_release(void *rxq);
|
||||
void ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
int ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
int ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
|
||||
|
||||
int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
||||
uint16_t nb_desc, uint32_t socket_id,
|
||||
const struct rte_eth_txconf *tx_conf);
|
||||
void ionic_dev_tx_queue_release(void *tx_queue);
|
||||
void ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
|
||||
int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
||||
|
||||
|
@ -590,9 +590,9 @@ void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
|
||||
|
||||
void ixgbe_dev_free_queues(struct rte_eth_dev *dev);
|
||||
|
||||
void ixgbe_dev_rx_queue_release(void *rxq);
|
||||
void ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
void ixgbe_dev_tx_queue_release(void *txq);
|
||||
void ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
||||
uint16_t nb_rx_desc, unsigned int socket_id,
|
||||
|
@ -2487,9 +2487,9 @@ ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
|
||||
}
|
||||
|
||||
void __rte_cold
|
||||
ixgbe_dev_tx_queue_release(void *txq)
|
||||
ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
ixgbe_tx_queue_release(txq);
|
||||
ixgbe_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
|
||||
@ -2892,9 +2892,9 @@ ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
|
||||
}
|
||||
|
||||
void __rte_cold
|
||||
ixgbe_dev_rx_queue_release(void *rxq)
|
||||
ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
ixgbe_rx_queue_release(rxq);
|
||||
ixgbe_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3431,14 +3431,14 @@ ixgbe_dev_free_queues(struct rte_eth_dev *dev)
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
|
||||
ixgbe_dev_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "rx_ring", i);
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
|
||||
ixgbe_dev_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
rte_eth_dma_zone_free(dev, "tx_ring", i);
|
||||
}
|
||||
|
@ -1182,7 +1182,7 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
|
||||
|
||||
/* Free previous allocation if any */
|
||||
if (eth_dev->data->rx_queues[q_no] != NULL) {
|
||||
lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
|
||||
lio_dev_rx_queue_release(eth_dev, q_no);
|
||||
eth_dev->data->rx_queues[q_no] = NULL;
|
||||
}
|
||||
|
||||
@ -1204,16 +1204,18 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
|
||||
* Release the receive queue/ringbuffer. Called by
|
||||
* the upper layers.
|
||||
*
|
||||
* @param rxq
|
||||
* Opaque pointer to the receive queue to release
|
||||
* @param eth_dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param q_no
|
||||
* Receive queue index.
|
||||
*
|
||||
* @return
|
||||
* - nothing
|
||||
*/
|
||||
void
|
||||
lio_dev_rx_queue_release(void *rxq)
|
||||
lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
|
||||
{
|
||||
struct lio_droq *droq = rxq;
|
||||
struct lio_droq *droq = dev->data->rx_queues[q_no];
|
||||
int oq_no;
|
||||
|
||||
if (droq) {
|
||||
@ -1262,7 +1264,7 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
|
||||
|
||||
/* Free previous allocation if any */
|
||||
if (eth_dev->data->tx_queues[q_no] != NULL) {
|
||||
lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
|
||||
lio_dev_tx_queue_release(eth_dev, q_no);
|
||||
eth_dev->data->tx_queues[q_no] = NULL;
|
||||
}
|
||||
|
||||
@ -1292,16 +1294,18 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
|
||||
* Release the transmit queue/ringbuffer. Called by
|
||||
* the upper layers.
|
||||
*
|
||||
* @param txq
|
||||
* Opaque pointer to the transmit queue to release
|
||||
* @param eth_dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param q_no
|
||||
* Transmit queue index.
|
||||
*
|
||||
* @return
|
||||
* - nothing
|
||||
*/
|
||||
void
|
||||
lio_dev_tx_queue_release(void *txq)
|
||||
lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
|
||||
{
|
||||
struct lio_instr_queue *tq = txq;
|
||||
struct lio_instr_queue *tq = dev->data->tx_queues[q_no];
|
||||
uint32_t fw_mapped_iq_no;
|
||||
|
||||
|
||||
|
@ -172,8 +172,8 @@ struct lio_rss_set {
|
||||
uint8_t key[LIO_RSS_MAX_KEY_SZ];
|
||||
};
|
||||
|
||||
void lio_dev_rx_queue_release(void *rxq);
|
||||
void lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);
|
||||
|
||||
void lio_dev_tx_queue_release(void *txq);
|
||||
void lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);
|
||||
|
||||
#endif /* _LIO_ETHDEV_H_ */
|
||||
|
@ -1791,7 +1791,7 @@ lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
|
||||
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
|
||||
txq = eth_dev->data->tx_queues[i];
|
||||
if (txq != NULL) {
|
||||
lio_dev_tx_queue_release(txq);
|
||||
lio_dev_tx_queue_release(eth_dev, i);
|
||||
eth_dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
}
|
||||
@ -1799,7 +1799,7 @@ lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
|
||||
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
|
||||
rxq = eth_dev->data->rx_queues[i];
|
||||
if (rxq != NULL) {
|
||||
lio_dev_rx_queue_release(rxq);
|
||||
lio_dev_rx_queue_release(eth_dev, i);
|
||||
eth_dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1258,9 +1258,9 @@ memif_dev_close(struct rte_eth_dev *dev)
|
||||
memif_disconnect(dev);
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++)
|
||||
(*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]);
|
||||
(*dev->dev_ops->rx_queue_release)(dev, i);
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++)
|
||||
(*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]);
|
||||
(*dev->dev_ops->tx_queue_release)(dev, i);
|
||||
|
||||
memif_socket_remove_device(dev);
|
||||
} else {
|
||||
@ -1352,9 +1352,20 @@ memif_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
static void
|
||||
memif_queue_release(void *queue)
|
||||
memif_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct memif_queue *mq = (struct memif_queue *)queue;
|
||||
struct memif_queue *mq = dev->data->rx_queues[qid];
|
||||
|
||||
if (!mq)
|
||||
return;
|
||||
|
||||
rte_free(mq);
|
||||
}
|
||||
|
||||
static void
|
||||
memif_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct memif_queue *mq = dev->data->tx_queues[qid];
|
||||
|
||||
if (!mq)
|
||||
return;
|
||||
@ -1471,8 +1482,8 @@ static const struct eth_dev_ops ops = {
|
||||
.dev_configure = memif_dev_configure,
|
||||
.tx_queue_setup = memif_tx_queue_setup,
|
||||
.rx_queue_setup = memif_rx_queue_setup,
|
||||
.rx_queue_release = memif_queue_release,
|
||||
.tx_queue_release = memif_queue_release,
|
||||
.rx_queue_release = memif_rx_queue_release,
|
||||
.tx_queue_release = memif_tx_queue_release,
|
||||
.rx_queue_intr_enable = memif_rx_queue_intr_enable,
|
||||
.rx_queue_intr_disable = memif_rx_queue_intr_disable,
|
||||
.link_update = memif_link_update,
|
||||
|
@ -391,9 +391,9 @@ mlx4_dev_close(struct rte_eth_dev *dev)
|
||||
mlx4_flow_clean(priv);
|
||||
mlx4_rss_deinit(priv);
|
||||
for (i = 0; i != dev->data->nb_rx_queues; ++i)
|
||||
mlx4_rx_queue_release(dev->data->rx_queues[i]);
|
||||
mlx4_rx_queue_release(dev, i);
|
||||
for (i = 0; i != dev->data->nb_tx_queues; ++i)
|
||||
mlx4_tx_queue_release(dev->data->tx_queues[i]);
|
||||
mlx4_tx_queue_release(dev, i);
|
||||
mlx4_proc_priv_uninit(dev);
|
||||
mlx4_mr_release(dev);
|
||||
if (priv->pd != NULL) {
|
||||
|
@ -826,6 +826,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
},
|
||||
.socket = socket,
|
||||
};
|
||||
dev->data->rx_queues[idx] = rxq;
|
||||
/* Enable scattered packets support for this queue if necessary. */
|
||||
MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
|
||||
@ -896,12 +897,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
}
|
||||
}
|
||||
DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
|
||||
dev->data->rx_queues[idx] = rxq;
|
||||
return 0;
|
||||
error:
|
||||
dev->data->rx_queues[idx] = NULL;
|
||||
ret = rte_errno;
|
||||
mlx4_rx_queue_release(rxq);
|
||||
mlx4_rx_queue_release(dev, idx);
|
||||
rte_errno = ret;
|
||||
MLX4_ASSERT(rte_errno > 0);
|
||||
return -rte_errno;
|
||||
@ -910,26 +909,20 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
/**
|
||||
* DPDK callback to release a Rx queue.
|
||||
*
|
||||
* @param dpdk_rxq
|
||||
* Generic Rx queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param idx
|
||||
* Receive queue index.
|
||||
*/
|
||||
void
|
||||
mlx4_rx_queue_release(void *dpdk_rxq)
|
||||
mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx)
|
||||
{
|
||||
struct rxq *rxq = (struct rxq *)dpdk_rxq;
|
||||
struct mlx4_priv *priv;
|
||||
unsigned int i;
|
||||
struct rxq *rxq = dev->data->rx_queues[idx];
|
||||
|
||||
if (rxq == NULL)
|
||||
return;
|
||||
priv = rxq->priv;
|
||||
for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)
|
||||
if (ETH_DEV(priv)->data->rx_queues[i] == rxq) {
|
||||
DEBUG("%p: removing Rx queue %p from list",
|
||||
(void *)ETH_DEV(priv), (void *)rxq);
|
||||
ETH_DEV(priv)->data->rx_queues[i] = NULL;
|
||||
break;
|
||||
}
|
||||
dev->data->rx_queues[idx] = NULL;
|
||||
DEBUG("%p: removing Rx queue %hu from list", (void *)dev, idx);
|
||||
MLX4_ASSERT(!rxq->cq);
|
||||
MLX4_ASSERT(!rxq->wq);
|
||||
MLX4_ASSERT(!rxq->wqes);
|
||||
|
@ -141,7 +141,7 @@ int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
|
||||
uint16_t desc, unsigned int socket,
|
||||
const struct rte_eth_rxconf *conf,
|
||||
struct rte_mempool *mp);
|
||||
void mlx4_rx_queue_release(void *dpdk_rxq);
|
||||
void mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx);
|
||||
|
||||
/* mlx4_rxtx.c */
|
||||
|
||||
@ -162,7 +162,7 @@ uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv);
|
||||
int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
|
||||
uint16_t desc, unsigned int socket,
|
||||
const struct rte_eth_txconf *conf);
|
||||
void mlx4_tx_queue_release(void *dpdk_txq);
|
||||
void mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx);
|
||||
|
||||
/* mlx4_mr.c */
|
||||
|
||||
|
@ -404,6 +404,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
.lb = !!priv->vf,
|
||||
.bounce_buf = bounce_buf,
|
||||
};
|
||||
dev->data->tx_queues[idx] = txq;
|
||||
priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_TX_QUEUE;
|
||||
priv->verbs_alloc_ctx.obj = txq;
|
||||
txq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0);
|
||||
@ -507,13 +508,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
/* Save pointer of global generation number to check memory event. */
|
||||
txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
|
||||
DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
|
||||
dev->data->tx_queues[idx] = txq;
|
||||
priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
|
||||
return 0;
|
||||
error:
|
||||
dev->data->tx_queues[idx] = NULL;
|
||||
ret = rte_errno;
|
||||
mlx4_tx_queue_release(txq);
|
||||
mlx4_tx_queue_release(dev, idx);
|
||||
rte_errno = ret;
|
||||
MLX4_ASSERT(rte_errno > 0);
|
||||
priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
|
||||
@ -523,26 +522,20 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
/**
|
||||
* DPDK callback to release a Tx queue.
|
||||
*
|
||||
* @param dpdk_txq
|
||||
* Generic Tx queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param idx
|
||||
* Transmit queue index.
|
||||
*/
|
||||
void
|
||||
mlx4_tx_queue_release(void *dpdk_txq)
|
||||
mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx)
|
||||
{
|
||||
struct txq *txq = (struct txq *)dpdk_txq;
|
||||
struct mlx4_priv *priv;
|
||||
unsigned int i;
|
||||
struct txq *txq = dev->data->tx_queues[idx];
|
||||
|
||||
if (txq == NULL)
|
||||
return;
|
||||
priv = txq->priv;
|
||||
for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)
|
||||
if (ETH_DEV(priv)->data->tx_queues[i] == txq) {
|
||||
DEBUG("%p: removing Tx queue %p from list",
|
||||
(void *)ETH_DEV(priv), (void *)txq);
|
||||
ETH_DEV(priv)->data->tx_queues[i] = NULL;
|
||||
break;
|
||||
}
|
||||
DEBUG("%p: removing Tx queue %hu from list", (void *)dev, idx);
|
||||
dev->data->tx_queues[idx] = NULL;
|
||||
mlx4_txq_free_elts(txq);
|
||||
if (txq->qp)
|
||||
claim_zero(mlx4_glue->destroy_qp(txq->qp));
|
||||
|
@ -191,7 +191,7 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
int mlx5_rx_hairpin_queue_setup
|
||||
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
const struct rte_eth_hairpin_conf *hairpin_conf);
|
||||
void mlx5_rx_queue_release(void *dpdk_rxq);
|
||||
void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
|
||||
void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
|
||||
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
|
@ -794,25 +794,22 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
|
||||
/**
|
||||
* DPDK callback to release a RX queue.
|
||||
*
|
||||
* @param dpdk_rxq
|
||||
* Generic RX queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Receive queue index.
|
||||
*/
|
||||
void
|
||||
mlx5_rx_queue_release(void *dpdk_rxq)
|
||||
mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
|
||||
struct mlx5_rxq_ctrl *rxq_ctrl;
|
||||
struct mlx5_priv *priv;
|
||||
struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
if (rxq == NULL)
|
||||
return;
|
||||
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
|
||||
priv = rxq_ctrl->priv;
|
||||
if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
|
||||
if (!mlx5_rxq_releasable(dev, qid))
|
||||
rte_panic("port %u Rx queue %u is still used by a flow and"
|
||||
" cannot be removed\n",
|
||||
PORT_ID(priv), rxq->idx);
|
||||
mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
|
||||
" cannot be removed\n", dev->data->port_id, qid);
|
||||
mlx5_rxq_release(dev, qid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -204,7 +204,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
int mlx5_tx_hairpin_queue_setup
|
||||
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
const struct rte_eth_hairpin_conf *hairpin_conf);
|
||||
void mlx5_tx_queue_release(void *dpdk_txq);
|
||||
void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
|
||||
int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
|
||||
void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
|
||||
|
@ -470,28 +470,21 @@ mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
|
||||
/**
|
||||
* DPDK callback to release a TX queue.
|
||||
*
|
||||
* @param dpdk_txq
|
||||
* Generic TX queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Transmit queue index.
|
||||
*/
|
||||
void
|
||||
mlx5_tx_queue_release(void *dpdk_txq)
|
||||
mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
|
||||
struct mlx5_txq_ctrl *txq_ctrl;
|
||||
struct mlx5_priv *priv;
|
||||
unsigned int i;
|
||||
struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
|
||||
|
||||
if (txq == NULL)
|
||||
return;
|
||||
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
|
||||
priv = txq_ctrl->priv;
|
||||
for (i = 0; (i != priv->txqs_n); ++i)
|
||||
if ((*priv->txqs)[i] == txq) {
|
||||
DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
|
||||
PORT_ID(priv), txq->idx);
|
||||
mlx5_txq_release(ETH_DEV(priv), i);
|
||||
break;
|
||||
}
|
||||
DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
|
||||
dev->data->port_id, qid);
|
||||
mlx5_txq_release(dev, qid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -446,12 +446,12 @@ mvneta_dev_close(struct rte_eth_dev *dev)
|
||||
ret = mvneta_dev_stop(dev);
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
mvneta_rx_queue_release(dev->data->rx_queues[i]);
|
||||
mvneta_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
mvneta_tx_queue_release(dev->data->tx_queues[i]);
|
||||
mvneta_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
|
||||
|
@ -796,13 +796,15 @@ mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
/**
|
||||
* DPDK callback to release the transmit queue.
|
||||
*
|
||||
* @param txq
|
||||
* Generic transmit queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Transmit queue index.
|
||||
*/
|
||||
void
|
||||
mvneta_tx_queue_release(void *txq)
|
||||
mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct mvneta_txq *q = txq;
|
||||
struct mvneta_txq *q = dev->data->tx_queues[qid];
|
||||
|
||||
if (!q)
|
||||
return;
|
||||
@ -959,13 +961,15 @@ mvneta_flush_queues(struct rte_eth_dev *dev)
|
||||
/**
|
||||
* DPDK callback to release the receive queue.
|
||||
*
|
||||
* @param rxq
|
||||
* Generic receive queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Receive queue index.
|
||||
*/
|
||||
void
|
||||
mvneta_rx_queue_release(void *rxq)
|
||||
mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct mvneta_rxq *q = rxq;
|
||||
struct mvneta_rxq *q = dev->data->rx_queues[qid];
|
||||
|
||||
if (!q)
|
||||
return;
|
||||
@ -978,7 +982,7 @@ mvneta_rx_queue_release(void *rxq)
|
||||
if (q->priv->ppio)
|
||||
mvneta_rx_queue_flush(q);
|
||||
|
||||
rte_free(rxq);
|
||||
rte_free(q);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -32,7 +32,7 @@ int
|
||||
mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
unsigned int socket, const struct rte_eth_txconf *conf);
|
||||
|
||||
void mvneta_rx_queue_release(void *rxq);
|
||||
void mvneta_tx_queue_release(void *txq);
|
||||
void mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
#endif /* _MVNETA_RXTX_H_ */
|
||||
|
@ -2059,13 +2059,15 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
/**
|
||||
* DPDK callback to release the receive queue.
|
||||
*
|
||||
* @param rxq
|
||||
* Generic receive queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Receive queue index.
|
||||
*/
|
||||
static void
|
||||
mrvl_rx_queue_release(void *rxq)
|
||||
mrvl_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct mrvl_rxq *q = rxq;
|
||||
struct mrvl_rxq *q = dev->data->rx_queues[qid];
|
||||
struct pp2_ppio_tc_params *tc_params;
|
||||
int i, num, tc, inq;
|
||||
struct pp2_hif *hif;
|
||||
@ -2146,13 +2148,15 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
/**
|
||||
* DPDK callback to release the transmit queue.
|
||||
*
|
||||
* @param txq
|
||||
* Generic transmit queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Transmit queue index.
|
||||
*/
|
||||
static void
|
||||
mrvl_tx_queue_release(void *txq)
|
||||
mrvl_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct mrvl_txq *q = txq;
|
||||
struct mrvl_txq *q = dev->data->tx_queues[qid];
|
||||
|
||||
if (!q)
|
||||
return;
|
||||
|
@ -356,9 +356,9 @@ static void hn_txd_put(struct hn_tx_queue *txq, struct hn_txdesc *txd)
|
||||
}
|
||||
|
||||
void
|
||||
hn_dev_tx_queue_release(void *arg)
|
||||
hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct hn_tx_queue *txq = arg;
|
||||
struct hn_tx_queue *txq = dev->data->tx_queues[qid];
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
@ -1004,9 +1004,9 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
|
||||
}
|
||||
|
||||
void
|
||||
hn_dev_rx_queue_release(void *arg)
|
||||
hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct hn_rx_queue *rxq = arg;
|
||||
struct hn_rx_queue *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
@ -1648,7 +1648,7 @@ hn_dev_free_queues(struct rte_eth_dev *dev)
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
hn_dev_tx_queue_release(dev->data->tx_queues[i]);
|
||||
hn_dev_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
@ -198,7 +198,7 @@ int hn_dev_link_update(struct rte_eth_dev *dev, int wait);
|
||||
int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
uint16_t nb_desc, unsigned int socket_id,
|
||||
const struct rte_eth_txconf *tx_conf);
|
||||
void hn_dev_tx_queue_release(void *arg);
|
||||
void hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
struct rte_eth_txq_info *qinfo);
|
||||
int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
|
||||
@ -214,7 +214,7 @@ int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
struct rte_mempool *mp);
|
||||
void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
struct rte_eth_rxq_info *qinfo);
|
||||
void hn_dev_rx_queue_release(void *arg);
|
||||
void hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
|
||||
int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
|
||||
void hn_dev_free_queues(struct rte_eth_dev *dev);
|
||||
|
@ -624,11 +624,8 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
|
||||
|
||||
rte_rwlock_read_lock(&hv->vf_lock);
|
||||
vf_dev = hn_get_vf_dev(hv);
|
||||
if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
|
||||
void *subq = vf_dev->data->tx_queues[queue_id];
|
||||
|
||||
(*vf_dev->dev_ops->tx_queue_release)(subq);
|
||||
}
|
||||
if (vf_dev && vf_dev->dev_ops->tx_queue_release)
|
||||
(*vf_dev->dev_ops->tx_queue_release)(vf_dev, queue_id);
|
||||
|
||||
rte_rwlock_read_unlock(&hv->vf_lock);
|
||||
}
|
||||
@ -659,11 +656,8 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
|
||||
|
||||
rte_rwlock_read_lock(&hv->vf_lock);
|
||||
vf_dev = hn_get_vf_dev(hv);
|
||||
if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
|
||||
void *subq = vf_dev->data->rx_queues[queue_id];
|
||||
|
||||
(*vf_dev->dev_ops->rx_queue_release)(subq);
|
||||
}
|
||||
if (vf_dev && vf_dev->dev_ops->rx_queue_release)
|
||||
(*vf_dev->dev_ops->rx_queue_release)(vf_dev, queue_id);
|
||||
rte_rwlock_read_unlock(&hv->vf_lock);
|
||||
}
|
||||
|
||||
|
@ -231,12 +231,12 @@ nfb_eth_dev_close(struct rte_eth_dev *dev)
|
||||
nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac);
|
||||
|
||||
for (i = 0; i < nb_rx; i++) {
|
||||
nfb_eth_rx_queue_release(dev->data->rx_queues[i]);
|
||||
nfb_eth_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
for (i = 0; i < nb_tx; i++) {
|
||||
nfb_eth_tx_queue_release(dev->data->tx_queues[i]);
|
||||
nfb_eth_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
@ -176,9 +176,10 @@ nfb_eth_rx_queue_init(struct nfb_device *nfb,
|
||||
}
|
||||
|
||||
void
|
||||
nfb_eth_rx_queue_release(void *q)
|
||||
nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct ndp_rx_queue *rxq = (struct ndp_rx_queue *)q;
|
||||
struct ndp_rx_queue *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
if (rxq->queue != NULL) {
|
||||
ndp_close_rx_queue(rxq->queue);
|
||||
rte_free(rxq);
|
||||
|
@ -94,11 +94,13 @@ nfb_eth_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
/**
|
||||
* DPDK callback to release a RX queue.
|
||||
*
|
||||
* @param dpdk_rxq
|
||||
* Generic RX queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Receive queue index.
|
||||
*/
|
||||
void
|
||||
nfb_eth_rx_queue_release(void *q);
|
||||
nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
/**
|
||||
* Start traffic on Rx queue.
|
||||
|
@ -102,9 +102,10 @@ nfb_eth_tx_queue_init(struct nfb_device *nfb,
|
||||
}
|
||||
|
||||
void
|
||||
nfb_eth_tx_queue_release(void *q)
|
||||
nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct ndp_tx_queue *txq = (struct ndp_tx_queue *)q;
|
||||
struct ndp_tx_queue *txq = dev->data->tx_queues[qid];
|
||||
|
||||
if (txq->queue != NULL) {
|
||||
ndp_close_tx_queue(txq->queue);
|
||||
rte_free(txq);
|
||||
|
@ -70,11 +70,13 @@ nfb_eth_tx_queue_init(struct nfb_device *nfb,
|
||||
/**
|
||||
* DPDK callback to release a RX queue.
|
||||
*
|
||||
* @param dpdk_rxq
|
||||
* Generic RX queue pointer.
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param qid
|
||||
* Receive queue index.
|
||||
*/
|
||||
void
|
||||
nfb_eth_tx_queue_release(void *q);
|
||||
nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
/**
|
||||
* Start traffic on Tx queue.
|
||||
|
@ -464,9 +464,9 @@ nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
|
||||
}
|
||||
|
||||
void
|
||||
nfp_net_rx_queue_release(void *rx_queue)
|
||||
nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
struct nfp_net_rxq *rxq = rx_queue;
|
||||
struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx];
|
||||
|
||||
if (rxq) {
|
||||
nfp_net_rx_queue_release_mbufs(rxq);
|
||||
@ -513,7 +513,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
* calling nfp_net_stop
|
||||
*/
|
||||
if (dev->data->rx_queues[queue_idx]) {
|
||||
nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
|
||||
nfp_net_rx_queue_release(dev, queue_idx);
|
||||
dev->data->rx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
@ -523,6 +523,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
if (rxq == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->data->rx_queues[queue_idx] = rxq;
|
||||
|
||||
/* Hw queues mapping based on firmware configuration */
|
||||
rxq->qidx = queue_idx;
|
||||
rxq->fl_qcidx = queue_idx * hw->stride_rx;
|
||||
@ -556,7 +558,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
if (tz == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Error allocating rx dma");
|
||||
nfp_net_rx_queue_release(rxq);
|
||||
nfp_net_rx_queue_release(dev, queue_idx);
|
||||
dev->data->rx_queues[queue_idx] = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -569,7 +572,8 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
sizeof(*rxq->rxbufs) * nb_desc,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (rxq->rxbufs == NULL) {
|
||||
nfp_net_rx_queue_release(rxq);
|
||||
nfp_net_rx_queue_release(dev, queue_idx);
|
||||
dev->data->rx_queues[queue_idx] = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -578,7 +582,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
nfp_net_reset_rx_queue(rxq);
|
||||
|
||||
dev->data->rx_queues[queue_idx] = rxq;
|
||||
rxq->hw = hw;
|
||||
|
||||
/*
|
||||
@ -651,9 +654,9 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
|
||||
}
|
||||
|
||||
void
|
||||
nfp_net_tx_queue_release(void *tx_queue)
|
||||
nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
|
||||
{
|
||||
struct nfp_net_txq *txq = tx_queue;
|
||||
struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
|
||||
|
||||
if (txq) {
|
||||
nfp_net_tx_queue_release_mbufs(txq);
|
||||
@ -714,7 +717,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
if (dev->data->tx_queues[queue_idx]) {
|
||||
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
|
||||
queue_idx);
|
||||
nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
||||
nfp_net_tx_queue_release(dev, queue_idx);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
@ -726,6 +729,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev->data->tx_queues[queue_idx] = txq;
|
||||
|
||||
/*
|
||||
* Allocate TX ring hardware descriptors. A memzone large enough to
|
||||
* handle the maximum ring size is allocated in order to allow for
|
||||
@ -737,7 +742,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
socket_id);
|
||||
if (tz == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Error allocating tx dma");
|
||||
nfp_net_tx_queue_release(txq);
|
||||
nfp_net_tx_queue_release(dev, queue_idx);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -763,7 +769,8 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
sizeof(*txq->txbufs) * nb_desc,
|
||||
RTE_CACHE_LINE_SIZE, socket_id);
|
||||
if (txq->txbufs == NULL) {
|
||||
nfp_net_tx_queue_release(txq);
|
||||
nfp_net_tx_queue_release(dev, queue_idx);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
|
||||
@ -771,7 +778,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
|
||||
nfp_net_reset_tx_queue(txq);
|
||||
|
||||
dev->data->tx_queues[queue_idx] = txq;
|
||||
txq->hw = hw;
|
||||
|
||||
/*
|
||||
|
@ -279,13 +279,13 @@ uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
|
||||
uint16_t queue_idx);
|
||||
uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
void nfp_net_rx_queue_release(void *rxq);
|
||||
void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
void nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq);
|
||||
int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
uint16_t nb_desc, unsigned int socket_id,
|
||||
const struct rte_eth_rxconf *rx_conf,
|
||||
struct rte_mempool *mp);
|
||||
void nfp_net_tx_queue_release(void *txq);
|
||||
void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
||||
void nfp_net_reset_tx_queue(struct nfp_net_txq *txq);
|
||||
int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
uint16_t nb_desc, unsigned int socket_id,
|
||||
|
@ -69,9 +69,9 @@ void ngbe_dev_clear_queues(struct rte_eth_dev *dev);
|
||||
|
||||
void ngbe_dev_free_queues(struct rte_eth_dev *dev);
|
||||
|
||||
void ngbe_dev_rx_queue_release(void *rxq);
|
||||
void ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
void ngbe_dev_tx_queue_release(void *txq);
|
||||
void ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
int ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
||||
uint16_t nb_rx_desc, unsigned int socket_id,
|
||||
|
@ -453,9 +453,9 @@ ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
|
||||
}
|
||||
|
||||
void
|
||||
ngbe_dev_tx_queue_release(void *txq)
|
||||
ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
ngbe_tx_queue_release(txq);
|
||||
ngbe_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
/* (Re)set dynamic ngbe_tx_queue fields to defaults */
|
||||
@ -673,9 +673,9 @@ ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
|
||||
}
|
||||
|
||||
void
|
||||
ngbe_dev_rx_queue_release(void *rxq)
|
||||
ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
ngbe_rx_queue_release(rxq);
|
||||
ngbe_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -916,13 +916,13 @@ ngbe_dev_free_queues(struct rte_eth_dev *dev)
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
ngbe_dev_rx_queue_release(dev->data->rx_queues[i]);
|
||||
ngbe_dev_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
ngbe_dev_tx_queue_release(dev->data->tx_queues[i]);
|
||||
ngbe_dev_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
@ -353,14 +353,24 @@ eth_stats_reset(struct rte_eth_dev *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
eth_queue_release(void *q)
|
||||
eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct null_queue *nq;
|
||||
struct null_queue *nq = dev->data->rx_queues[qid];
|
||||
|
||||
if (q == NULL)
|
||||
if (nq == NULL)
|
||||
return;
|
||||
|
||||
rte_free(nq->dummy_packet);
|
||||
}
|
||||
|
||||
static void
|
||||
eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct null_queue *nq = dev->data->tx_queues[qid];
|
||||
|
||||
if (nq == NULL)
|
||||
return;
|
||||
|
||||
nq = q;
|
||||
rte_free(nq->dummy_packet);
|
||||
}
|
||||
|
||||
@ -483,8 +493,8 @@ static const struct eth_dev_ops ops = {
|
||||
.dev_infos_get = eth_dev_info,
|
||||
.rx_queue_setup = eth_rx_queue_setup,
|
||||
.tx_queue_setup = eth_tx_queue_setup,
|
||||
.rx_queue_release = eth_queue_release,
|
||||
.tx_queue_release = eth_queue_release,
|
||||
.rx_queue_release = eth_rx_queue_release,
|
||||
.tx_queue_release = eth_tx_queue_release,
|
||||
.mtu_set = eth_mtu_set,
|
||||
.link_update = eth_link_update,
|
||||
.mac_addr_set = eth_mac_address_set,
|
||||
|
@ -971,20 +971,18 @@ octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
|
||||
}
|
||||
|
||||
static void
|
||||
octeontx_dev_tx_queue_release(void *tx_queue)
|
||||
octeontx_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct octeontx_txq *txq = tx_queue;
|
||||
int res;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (txq) {
|
||||
res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
|
||||
if (dev->data->tx_queues[qid]) {
|
||||
res = octeontx_dev_tx_queue_stop(dev, qid);
|
||||
if (res < 0)
|
||||
octeontx_log_err("failed stop tx_queue(%d)\n",
|
||||
txq->queue_id);
|
||||
octeontx_log_err("failed stop tx_queue(%d)\n", qid);
|
||||
|
||||
rte_free(txq);
|
||||
rte_free(dev->data->tx_queues[qid]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1013,7 +1011,7 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
if (dev->data->tx_queues[qidx] != NULL) {
|
||||
PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
|
||||
qidx);
|
||||
octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
|
||||
octeontx_dev_tx_queue_release(dev, qidx);
|
||||
dev->data->tx_queues[qidx] = NULL;
|
||||
}
|
||||
|
||||
@ -1221,9 +1219,9 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
}
|
||||
|
||||
static void
|
||||
octeontx_dev_rx_queue_release(void *rxq)
|
||||
octeontx_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
rte_free(rxq);
|
||||
rte_free(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
static const uint32_t *
|
||||
|
@ -555,16 +555,17 @@ otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
|
||||
}
|
||||
|
||||
static void
|
||||
otx2_nix_rx_queue_release(void *rx_queue)
|
||||
otx2_nix_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct otx2_eth_rxq *rxq = rx_queue;
|
||||
struct otx2_eth_rxq *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
if (!rxq)
|
||||
return;
|
||||
|
||||
otx2_nix_dbg("Releasing rxq %u", rxq->rq);
|
||||
nix_cq_rq_uninit(rxq->eth_dev, rxq);
|
||||
rte_free(rx_queue);
|
||||
rte_free(rxq);
|
||||
dev->data->rx_queues[qid] = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -608,9 +609,8 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
|
||||
/* Free memory prior to re-allocation if needed */
|
||||
if (eth_dev->data->rx_queues[rq] != NULL) {
|
||||
otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
|
||||
otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
|
||||
otx2_nix_rx_queue_release(eth_dev, rq);
|
||||
rte_eth_dma_zone_free(eth_dev, "cq", rq);
|
||||
eth_dev->data->rx_queues[rq] = NULL;
|
||||
}
|
||||
|
||||
offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
|
||||
@ -641,6 +641,8 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
|
||||
rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
|
||||
rxq->tstamp = &dev->tstamp;
|
||||
|
||||
eth_dev->data->rx_queues[rq] = rxq;
|
||||
|
||||
/* Alloc completion queue */
|
||||
rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
|
||||
if (rc) {
|
||||
@ -657,7 +659,6 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
|
||||
otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
|
||||
rq, mp->name, qsize, nb_desc, rxq->qlen);
|
||||
|
||||
eth_dev->data->rx_queues[rq] = rxq;
|
||||
eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
|
||||
|
||||
/* Calculating delta and freq mult between PTP HI clock and tsc.
|
||||
@ -679,7 +680,7 @@ otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
|
||||
return 0;
|
||||
|
||||
free_rxq:
|
||||
otx2_nix_rx_queue_release(rxq);
|
||||
otx2_nix_rx_queue_release(eth_dev, rq);
|
||||
fail:
|
||||
return rc;
|
||||
}
|
||||
@ -1217,16 +1218,13 @@ otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
|
||||
}
|
||||
|
||||
static void
|
||||
otx2_nix_tx_queue_release(void *_txq)
|
||||
otx2_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
|
||||
{
|
||||
struct otx2_eth_txq *txq = _txq;
|
||||
struct rte_eth_dev *eth_dev;
|
||||
struct otx2_eth_txq *txq = eth_dev->data->tx_queues[qid];
|
||||
|
||||
if (!txq)
|
||||
return;
|
||||
|
||||
eth_dev = txq->dev->eth_dev;
|
||||
|
||||
otx2_nix_dbg("Releasing txq %u", txq->sq);
|
||||
|
||||
/* Flush and disable tm */
|
||||
@ -1241,6 +1239,7 @@ otx2_nix_tx_queue_release(void *_txq)
|
||||
}
|
||||
otx2_nix_sq_flush_post(txq);
|
||||
rte_free(txq);
|
||||
eth_dev->data->tx_queues[qid] = NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -1268,8 +1267,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
|
||||
/* Free memory prior to re-allocation if needed. */
|
||||
if (eth_dev->data->tx_queues[sq] != NULL) {
|
||||
otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
|
||||
otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
|
||||
eth_dev->data->tx_queues[sq] = NULL;
|
||||
otx2_nix_tx_queue_release(eth_dev, sq);
|
||||
}
|
||||
|
||||
/* Find the expected offloads for this queue */
|
||||
@ -1288,6 +1286,7 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
|
||||
txq->sqb_pool = NULL;
|
||||
txq->offloads = offloads;
|
||||
dev->tx_offloads |= offloads;
|
||||
eth_dev->data->tx_queues[sq] = txq;
|
||||
|
||||
/*
|
||||
* Allocate memory for flow control updates from HW.
|
||||
@ -1334,12 +1333,11 @@ otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
|
||||
" lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
|
||||
fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
|
||||
txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
|
||||
eth_dev->data->tx_queues[sq] = txq;
|
||||
eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
|
||||
return 0;
|
||||
|
||||
free_txq:
|
||||
otx2_nix_tx_queue_release(txq);
|
||||
otx2_nix_tx_queue_release(eth_dev, sq);
|
||||
fail:
|
||||
return rc;
|
||||
}
|
||||
@ -1378,8 +1376,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
|
||||
}
|
||||
memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
|
||||
tx_qconf[i].valid = true;
|
||||
otx2_nix_tx_queue_release(txq[i]);
|
||||
eth_dev->data->tx_queues[i] = NULL;
|
||||
otx2_nix_tx_queue_release(eth_dev, i);
|
||||
}
|
||||
|
||||
rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
|
||||
@ -1391,8 +1388,7 @@ nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
|
||||
}
|
||||
memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
|
||||
rx_qconf[i].valid = true;
|
||||
otx2_nix_rx_queue_release(rxq[i]);
|
||||
eth_dev->data->rx_queues[i] = NULL;
|
||||
otx2_nix_rx_queue_release(eth_dev, i);
|
||||
}
|
||||
|
||||
dev->tx_qconf = tx_qconf;
|
||||
@ -1412,8 +1408,6 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
|
||||
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
|
||||
struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
|
||||
struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
|
||||
struct otx2_eth_txq **txq;
|
||||
struct otx2_eth_rxq **rxq;
|
||||
int rc, i, nb_rxq, nb_txq;
|
||||
|
||||
nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
|
||||
@ -1450,9 +1444,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
|
||||
&tx_qconf[i].conf.tx);
|
||||
if (rc) {
|
||||
otx2_err("Failed to setup tx queue rc=%d", rc);
|
||||
txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
|
||||
for (i -= 1; i >= 0; i--)
|
||||
otx2_nix_tx_queue_release(txq[i]);
|
||||
otx2_nix_tx_queue_release(eth_dev, i);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@ -1468,9 +1461,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
|
||||
rx_qconf[i].mempool);
|
||||
if (rc) {
|
||||
otx2_err("Failed to setup rx queue rc=%d", rc);
|
||||
rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
|
||||
for (i -= 1; i >= 0; i--)
|
||||
otx2_nix_rx_queue_release(rxq[i]);
|
||||
otx2_nix_rx_queue_release(eth_dev, i);
|
||||
goto release_tx_queues;
|
||||
}
|
||||
}
|
||||
@ -1480,9 +1472,8 @@ nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
|
||||
return 0;
|
||||
|
||||
release_tx_queues:
|
||||
txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
|
||||
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
|
||||
otx2_nix_tx_queue_release(txq[i]);
|
||||
otx2_nix_tx_queue_release(eth_dev, i);
|
||||
fail:
|
||||
if (tx_qconf)
|
||||
free(tx_qconf);
|
||||
@ -2647,17 +2638,13 @@ otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
|
||||
dev->ops = NULL;
|
||||
|
||||
/* Free up SQs */
|
||||
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
|
||||
otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
|
||||
eth_dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
|
||||
otx2_nix_tx_queue_release(eth_dev, i);
|
||||
eth_dev->data->nb_tx_queues = 0;
|
||||
|
||||
/* Free up RQ's and CQ's */
|
||||
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
|
||||
otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
|
||||
eth_dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
|
||||
otx2_nix_rx_queue_release(eth_dev, i);
|
||||
eth_dev->data->nb_rx_queues = 0;
|
||||
|
||||
/* Free tm resources */
|
||||
|
@ -248,16 +248,18 @@ otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
|
||||
* Release the receive queue/ringbuffer. Called by
|
||||
* the upper layers.
|
||||
*
|
||||
* @param rxq
|
||||
* Opaque pointer to the receive queue to release
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param q_no
|
||||
* Receive queue index.
|
||||
*
|
||||
* @return
|
||||
* - nothing
|
||||
*/
|
||||
static void
|
||||
otx_ep_rx_queue_release(void *rxq)
|
||||
otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
|
||||
{
|
||||
struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
|
||||
struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
|
||||
struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
|
||||
int q_id = rq->q_no;
|
||||
|
||||
@ -321,16 +323,18 @@ otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
|
||||
* Release the transmit queue/ringbuffer. Called by
|
||||
* the upper layers.
|
||||
*
|
||||
* @param txq
|
||||
* Opaque pointer to the transmit queue to release
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param q_no
|
||||
* Transmit queue index.
|
||||
*
|
||||
* @return
|
||||
* - nothing
|
||||
*/
|
||||
static void
|
||||
otx_ep_tx_queue_release(void *txq)
|
||||
otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
|
||||
{
|
||||
struct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;
|
||||
struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
|
||||
|
||||
otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
|
||||
}
|
||||
|
@ -2396,13 +2396,25 @@ qede_dev_reset(struct rte_eth_dev *dev)
|
||||
return qede_eth_dev_init(dev);
|
||||
}
|
||||
|
||||
static void
|
||||
qede_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
qede_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
static void
|
||||
qede_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
qede_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
static const struct eth_dev_ops qede_eth_dev_ops = {
|
||||
.dev_configure = qede_dev_configure,
|
||||
.dev_infos_get = qede_dev_info_get,
|
||||
.rx_queue_setup = qede_rx_queue_setup,
|
||||
.rx_queue_release = qede_rx_queue_release,
|
||||
.rx_queue_release = qede_dev_rx_queue_release,
|
||||
.tx_queue_setup = qede_tx_queue_setup,
|
||||
.tx_queue_release = qede_tx_queue_release,
|
||||
.tx_queue_release = qede_dev_tx_queue_release,
|
||||
.dev_start = qede_dev_start,
|
||||
.dev_reset = qede_dev_reset,
|
||||
.dev_set_link_up = qede_dev_set_link_up,
|
||||
@ -2444,9 +2456,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
|
||||
.dev_configure = qede_dev_configure,
|
||||
.dev_infos_get = qede_dev_info_get,
|
||||
.rx_queue_setup = qede_rx_queue_setup,
|
||||
.rx_queue_release = qede_rx_queue_release,
|
||||
.rx_queue_release = qede_dev_rx_queue_release,
|
||||
.tx_queue_setup = qede_tx_queue_setup,
|
||||
.tx_queue_release = qede_tx_queue_release,
|
||||
.tx_queue_release = qede_dev_tx_queue_release,
|
||||
.dev_start = qede_dev_start,
|
||||
.dev_reset = qede_dev_reset,
|
||||
.dev_set_link_up = qede_dev_set_link_up,
|
||||
|
@ -504,9 +504,9 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
|
||||
}
|
||||
|
||||
static void
|
||||
sfc_rx_queue_release(void *queue)
|
||||
sfc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct sfc_dp_rxq *dp_rxq = queue;
|
||||
struct sfc_dp_rxq *dp_rxq = dev->data->rx_queues[qid];
|
||||
struct sfc_rxq *rxq;
|
||||
struct sfc_adapter *sa;
|
||||
sfc_sw_index_t sw_index;
|
||||
@ -561,9 +561,9 @@ sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
|
||||
}
|
||||
|
||||
static void
|
||||
sfc_tx_queue_release(void *queue)
|
||||
sfc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct sfc_dp_txq *dp_txq = queue;
|
||||
struct sfc_dp_txq *dp_txq = dev->data->tx_queues[qid];
|
||||
struct sfc_txq *txq;
|
||||
sfc_sw_index_t sw_index;
|
||||
struct sfc_adapter *sa;
|
||||
|
@ -1143,26 +1143,28 @@ eth_stats_reset(struct rte_eth_dev *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
eth_rx_queue_release(void *q)
|
||||
eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
|
||||
struct szedata2_rx_queue *rxq = dev->data->rx_queues[qid];
|
||||
|
||||
if (rxq != NULL) {
|
||||
if (rxq->sze != NULL)
|
||||
szedata_close(rxq->sze);
|
||||
rte_free(rxq);
|
||||
dev->data->rx_queues[qid] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
eth_tx_queue_release(void *q)
|
||||
eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
|
||||
struct szedata2_tx_queue *txq = dev->data->tx_queues[qid];
|
||||
|
||||
if (txq != NULL) {
|
||||
if (txq->sze != NULL)
|
||||
szedata_close(txq->sze);
|
||||
rte_free(txq);
|
||||
dev->data->tx_queues[qid] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1182,15 +1184,11 @@ eth_dev_close(struct rte_eth_dev *dev)
|
||||
|
||||
free(internals->sze_dev_path);
|
||||
|
||||
for (i = 0; i < nb_rx; i++) {
|
||||
eth_rx_queue_release(dev->data->rx_queues[i]);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < nb_rx; i++)
|
||||
eth_rx_queue_release(dev, i);
|
||||
dev->data->nb_rx_queues = 0;
|
||||
for (i = 0; i < nb_tx; i++) {
|
||||
eth_tx_queue_release(dev->data->tx_queues[i]);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < nb_tx; i++)
|
||||
eth_tx_queue_release(dev, i);
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
||||
return ret;
|
||||
@ -1244,10 +1242,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (dev->data->rx_queues[rx_queue_id] != NULL) {
|
||||
eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
|
||||
dev->data->rx_queues[rx_queue_id] = NULL;
|
||||
}
|
||||
if (dev->data->rx_queues[rx_queue_id] != NULL)
|
||||
eth_rx_queue_release(dev, rx_queue_id);
|
||||
|
||||
rxq = rte_zmalloc_socket("szedata2 rx queue",
|
||||
sizeof(struct szedata2_rx_queue),
|
||||
@ -1259,18 +1255,20 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
rxq->priv = internals;
|
||||
dev->data->rx_queues[rx_queue_id] = rxq;
|
||||
|
||||
rxq->sze = szedata_open(internals->sze_dev_path);
|
||||
if (rxq->sze == NULL) {
|
||||
PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
|
||||
"%" PRIu16 "!", rx_queue_id);
|
||||
eth_rx_queue_release(rxq);
|
||||
eth_rx_queue_release(dev, rx_queue_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = szedata_subscribe3(rxq->sze, &rx, &tx);
|
||||
if (ret != 0 || rx == 0) {
|
||||
PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
|
||||
"%" PRIu16 "!", rx_queue_id);
|
||||
eth_rx_queue_release(rxq);
|
||||
eth_rx_queue_release(dev, rx_queue_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
rxq->rx_channel = rx_channel;
|
||||
@ -1281,8 +1279,6 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
rxq->rx_bytes = 0;
|
||||
rxq->err_pkts = 0;
|
||||
|
||||
dev->data->rx_queues[rx_queue_id] = rxq;
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
|
||||
"%u (channel id %u).", rxq->qid, socket_id,
|
||||
rxq->rx_channel);
|
||||
@ -1306,10 +1302,8 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (dev->data->tx_queues[tx_queue_id] != NULL) {
|
||||
eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
|
||||
dev->data->tx_queues[tx_queue_id] = NULL;
|
||||
}
|
||||
if (dev->data->tx_queues[tx_queue_id] != NULL)
|
||||
eth_tx_queue_release(dev, tx_queue_id);
|
||||
|
||||
txq = rte_zmalloc_socket("szedata2 tx queue",
|
||||
sizeof(struct szedata2_tx_queue),
|
||||
@ -1321,18 +1315,20 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
txq->priv = internals;
|
||||
dev->data->tx_queues[tx_queue_id] = txq;
|
||||
|
||||
txq->sze = szedata_open(internals->sze_dev_path);
|
||||
if (txq->sze == NULL) {
|
||||
PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
|
||||
"%" PRIu16 "!", tx_queue_id);
|
||||
eth_tx_queue_release(txq);
|
||||
eth_tx_queue_release(dev, tx_queue_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = szedata_subscribe3(txq->sze, &rx, &tx);
|
||||
if (ret != 0 || tx == 0) {
|
||||
PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
|
||||
"%" PRIu16 "!", tx_queue_id);
|
||||
eth_tx_queue_release(txq);
|
||||
eth_tx_queue_release(dev, tx_queue_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
txq->tx_channel = tx_channel;
|
||||
@ -1341,8 +1337,6 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
txq->tx_bytes = 0;
|
||||
txq->err_pkts = 0;
|
||||
|
||||
dev->data->tx_queues[tx_queue_id] = txq;
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
|
||||
"%u (channel id %u).", txq->qid, socket_id,
|
||||
txq->tx_channel);
|
||||
|
@ -1151,9 +1151,9 @@ tap_dev_close(struct rte_eth_dev *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
tap_rx_queue_release(void *queue)
|
||||
tap_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct rx_queue *rxq = queue;
|
||||
struct rx_queue *rxq = dev->data->rx_queues[qid];
|
||||
struct pmd_process_private *process_private;
|
||||
|
||||
if (!rxq)
|
||||
@ -1170,9 +1170,9 @@ tap_rx_queue_release(void *queue)
|
||||
}
|
||||
|
||||
static void
|
||||
tap_tx_queue_release(void *queue)
|
||||
tap_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct tx_queue *txq = queue;
|
||||
struct tx_queue *txq = dev->data->tx_queues[qid];
|
||||
struct pmd_process_private *process_private;
|
||||
|
||||
if (!txq)
|
||||
|
@ -858,13 +858,12 @@ nicvf_configure_rss_reta(struct rte_eth_dev *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
nicvf_dev_tx_queue_release(void *sq)
|
||||
nicvf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
struct nicvf_txq *txq;
|
||||
struct nicvf_txq *txq = dev->data->tx_queues[qid];
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
txq = (struct nicvf_txq *)sq;
|
||||
if (txq) {
|
||||
if (txq->txbuffs != NULL) {
|
||||
nicvf_tx_queue_release_mbufs(txq);
|
||||
@ -872,6 +871,7 @@ nicvf_dev_tx_queue_release(void *sq)
|
||||
txq->txbuffs = NULL;
|
||||
}
|
||||
rte_free(txq);
|
||||
dev->data->tx_queues[qid] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -985,8 +985,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
|
||||
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
|
||||
nicvf_netdev_qidx(nic, qidx));
|
||||
nicvf_dev_tx_queue_release(
|
||||
dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
|
||||
nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
|
||||
dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
|
||||
}
|
||||
|
||||
@ -1020,19 +1019,21 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
txq->pool_free = nicvf_single_pool_free_xmited_buffers;
|
||||
}
|
||||
|
||||
dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
|
||||
|
||||
/* Allocate software ring */
|
||||
txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
|
||||
nb_desc * sizeof(struct rte_mbuf *),
|
||||
RTE_CACHE_LINE_SIZE, nic->node);
|
||||
|
||||
if (txq->txbuffs == NULL) {
|
||||
nicvf_dev_tx_queue_release(txq);
|
||||
nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
|
||||
PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
|
||||
nicvf_dev_tx_queue_release(txq);
|
||||
nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1043,7 +1044,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
|
||||
txq->phys, txq->offloads);
|
||||
|
||||
dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
|
||||
dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
|
||||
RTE_ETH_QUEUE_STATE_STOPPED;
|
||||
return 0;
|
||||
@ -1161,11 +1161,11 @@ nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
|
||||
}
|
||||
|
||||
static void
|
||||
nicvf_dev_rx_queue_release(void *rx_queue)
|
||||
nicvf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
rte_free(rx_queue);
|
||||
rte_free(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1336,8 +1336,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
|
||||
PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
|
||||
nicvf_netdev_qidx(nic, qidx));
|
||||
nicvf_dev_rx_queue_release(
|
||||
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
|
||||
nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
|
||||
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
|
||||
}
|
||||
|
||||
@ -1365,12 +1364,14 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
else
|
||||
rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
|
||||
|
||||
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
|
||||
|
||||
nicvf_rxq_mbuf_setup(rxq);
|
||||
|
||||
/* Alloc completion queue */
|
||||
if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
|
||||
PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
|
||||
nicvf_dev_rx_queue_release(rxq);
|
||||
nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1382,7 +1383,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
|
||||
rte_mempool_avail_count(mp), rxq->phys, offloads);
|
||||
|
||||
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
|
||||
dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
|
||||
RTE_ETH_QUEUE_STATE_STOPPED;
|
||||
return 0;
|
||||
|
@ -433,9 +433,9 @@ void txgbe_dev_clear_queues(struct rte_eth_dev *dev);
|
||||
|
||||
void txgbe_dev_free_queues(struct rte_eth_dev *dev);
|
||||
|
||||
void txgbe_dev_rx_queue_release(void *rxq);
|
||||
void txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
void txgbe_dev_tx_queue_release(void *txq);
|
||||
void txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
int txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
||||
uint16_t nb_rx_desc, unsigned int socket_id,
|
||||
|
@ -2109,9 +2109,9 @@ txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
|
||||
}
|
||||
|
||||
void __rte_cold
|
||||
txgbe_dev_tx_queue_release(void *txq)
|
||||
txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
txgbe_tx_queue_release(txq);
|
||||
txgbe_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
/* (Re)set dynamic txgbe_tx_queue fields to defaults */
|
||||
@ -2437,9 +2437,9 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
|
||||
}
|
||||
|
||||
void __rte_cold
|
||||
txgbe_dev_rx_queue_release(void *rxq)
|
||||
txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
txgbe_rx_queue_release(rxq);
|
||||
txgbe_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2795,13 +2795,13 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
|
||||
txgbe_dev_rx_queue_release(dev, i);
|
||||
dev->data->rx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
|
||||
txgbe_dev_tx_queue_release(dev, i);
|
||||
dev->data->tx_queues[i] = NULL;
|
||||
}
|
||||
dev->data->nb_tx_queues = 0;
|
||||
|
@ -1346,9 +1346,15 @@ eth_stats_reset(struct rte_eth_dev *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
eth_queue_release(void *q)
|
||||
eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
rte_free(q);
|
||||
rte_free(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
static void
|
||||
eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
rte_free(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1388,8 +1394,8 @@ static const struct eth_dev_ops ops = {
|
||||
.dev_infos_get = eth_dev_info,
|
||||
.rx_queue_setup = eth_rx_queue_setup,
|
||||
.tx_queue_setup = eth_tx_queue_setup,
|
||||
.rx_queue_release = eth_queue_release,
|
||||
.tx_queue_release = eth_queue_release,
|
||||
.rx_queue_release = eth_rx_queue_release,
|
||||
.tx_queue_release = eth_tx_queue_release,
|
||||
.tx_done_cleanup = eth_tx_done_cleanup,
|
||||
.link_update = eth_link_update,
|
||||
.stats_get = eth_stats_get,
|
||||
|
@ -1058,18 +1058,12 @@ vmxnet3_free_queues(struct rte_eth_dev *dev)
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
void *rxq = dev->data->rx_queues[i];
|
||||
|
||||
vmxnet3_dev_rx_queue_release(rxq);
|
||||
}
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++)
|
||||
vmxnet3_dev_rx_queue_release(dev, i);
|
||||
dev->data->nb_rx_queues = 0;
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
void *txq = dev->data->tx_queues[i];
|
||||
|
||||
vmxnet3_dev_tx_queue_release(txq);
|
||||
}
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++)
|
||||
vmxnet3_dev_tx_queue_release(dev, i);
|
||||
dev->data->nb_tx_queues = 0;
|
||||
}
|
||||
|
||||
|
@ -182,8 +182,8 @@ vmxnet3_rx_data_ring(struct vmxnet3_hw *hw, uint32 rqID)
|
||||
|
||||
void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev);
|
||||
|
||||
void vmxnet3_dev_rx_queue_release(void *rxq);
|
||||
void vmxnet3_dev_tx_queue_release(void *txq);
|
||||
void vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
void vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
int vmxnet3_v4_rss_configure(struct rte_eth_dev *dev);
|
||||
|
||||
|
@ -165,9 +165,9 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
|
||||
}
|
||||
|
||||
void
|
||||
vmxnet3_dev_tx_queue_release(void *txq)
|
||||
vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
vmxnet3_tx_queue_t *tq = txq;
|
||||
vmxnet3_tx_queue_t *tq = dev->data->tx_queues[qid];
|
||||
|
||||
if (tq != NULL) {
|
||||
/* Release mbufs */
|
||||
@ -182,10 +182,10 @@ vmxnet3_dev_tx_queue_release(void *txq)
|
||||
}
|
||||
|
||||
void
|
||||
vmxnet3_dev_rx_queue_release(void *rxq)
|
||||
vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
int i;
|
||||
vmxnet3_rx_queue_t *rq = rxq;
|
||||
vmxnet3_rx_queue_t *rq = dev->data->rx_queues[qid];
|
||||
|
||||
if (rq != NULL) {
|
||||
/* Release mbufs */
|
||||
|
@ -312,7 +312,8 @@ typedef int (*eth_rx_disable_intr_t)(struct rte_eth_dev *dev,
|
||||
uint16_t rx_queue_id);
|
||||
/**< @internal Disable interrupt of a receive queue of an Ethernet device. */
|
||||
|
||||
typedef void (*eth_queue_release_t)(void *queue);
|
||||
typedef void (*eth_queue_release_t)(struct rte_eth_dev *dev,
|
||||
uint16_t queue_id);
|
||||
/**< @internal Release memory resources allocated by given RX/TX queue. */
|
||||
|
||||
typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
|
||||
|
@ -898,7 +898,7 @@ eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
return;
|
||||
|
||||
if (dev->dev_ops->rx_queue_release != NULL)
|
||||
(*dev->dev_ops->rx_queue_release)(rxq[qid]);
|
||||
(*dev->dev_ops->rx_queue_release)(dev, qid);
|
||||
rxq[qid] = NULL;
|
||||
}
|
||||
|
||||
@ -911,7 +911,7 @@ eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
return;
|
||||
|
||||
if (dev->dev_ops->tx_queue_release != NULL)
|
||||
(*dev->dev_ops->tx_queue_release)(txq[qid]);
|
||||
(*dev->dev_ops->tx_queue_release)(dev, qid);
|
||||
txq[qid] = NULL;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user