net/idpf: support queue release
Add support for queue operations: - rx_queue_release - tx_queue_release Signed-off-by: Beilei Xing <beilei.xing@intel.com> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
This commit is contained in:
parent
c03af6fa0e
commit
19b58dba9d
@ -758,6 +758,8 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
|
||||
.tx_queue_start = idpf_tx_queue_start,
|
||||
.rx_queue_stop = idpf_rx_queue_stop,
|
||||
.tx_queue_stop = idpf_tx_queue_stop,
|
||||
.rx_queue_release = idpf_dev_rx_queue_release,
|
||||
.tx_queue_release = idpf_dev_tx_queue_release,
|
||||
};
|
||||
|
||||
static uint16_t
|
||||
|
@ -171,6 +171,51 @@ reset_split_rx_bufq(struct idpf_rx_queue *rxq)
|
||||
rxq->bufq2 = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
idpf_rx_queue_release(void *rxq)
|
||||
{
|
||||
struct idpf_rx_queue *q = rxq;
|
||||
|
||||
if (q == NULL)
|
||||
return;
|
||||
|
||||
/* Split queue */
|
||||
if (q->bufq1 != NULL && q->bufq2 != NULL) {
|
||||
q->bufq1->ops->release_mbufs(q->bufq1);
|
||||
rte_free(q->bufq1->sw_ring);
|
||||
rte_memzone_free(q->bufq1->mz);
|
||||
rte_free(q->bufq1);
|
||||
q->bufq2->ops->release_mbufs(q->bufq2);
|
||||
rte_free(q->bufq2->sw_ring);
|
||||
rte_memzone_free(q->bufq2->mz);
|
||||
rte_free(q->bufq2);
|
||||
rte_memzone_free(q->mz);
|
||||
rte_free(q);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Single queue */
|
||||
q->ops->release_mbufs(q);
|
||||
rte_free(q->sw_ring);
|
||||
rte_memzone_free(q->mz);
|
||||
rte_free(q);
|
||||
}
|
||||
|
||||
static void
|
||||
idpf_tx_queue_release(void *txq)
|
||||
{
|
||||
struct idpf_tx_queue *q = txq;
|
||||
|
||||
if (q == NULL)
|
||||
return;
|
||||
|
||||
rte_free(q->complq);
|
||||
q->ops->release_mbufs(q);
|
||||
rte_free(q->sw_ring);
|
||||
rte_memzone_free(q->mz);
|
||||
rte_free(q);
|
||||
}
|
||||
|
||||
static inline void
|
||||
reset_split_rx_queue(struct idpf_rx_queue *rxq)
|
||||
{
|
||||
@ -392,6 +437,12 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Free memory if needed */
|
||||
if (dev->data->rx_queues[queue_idx] != NULL) {
|
||||
idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
|
||||
dev->data->rx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
/* Setup Rx description queue */
|
||||
rxq = rte_zmalloc_socket("idpf rxq",
|
||||
sizeof(struct idpf_rx_queue),
|
||||
@ -524,6 +575,12 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Free memory if needed */
|
||||
if (dev->data->rx_queues[queue_idx] != NULL) {
|
||||
idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
|
||||
dev->data->rx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
/* Setup Rx description queue */
|
||||
rxq = rte_zmalloc_socket("idpf rxq",
|
||||
sizeof(struct idpf_rx_queue),
|
||||
@ -630,6 +687,12 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Free memory if needed. */
|
||||
if (dev->data->tx_queues[queue_idx] != NULL) {
|
||||
idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
/* Allocate the TX queue data structure. */
|
||||
txq = rte_zmalloc_socket("idpf split txq",
|
||||
sizeof(struct idpf_tx_queue),
|
||||
@ -754,6 +817,12 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Free memory if needed. */
|
||||
if (dev->data->tx_queues[queue_idx] != NULL) {
|
||||
idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
||||
dev->data->tx_queues[queue_idx] = NULL;
|
||||
}
|
||||
|
||||
/* Allocate the TX queue data structure. */
|
||||
txq = rte_zmalloc_socket("idpf txq",
|
||||
sizeof(struct idpf_tx_queue),
|
||||
@ -1102,6 +1171,18 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
idpf_rx_queue_release(dev->data->rx_queues[qid]);
|
||||
}
|
||||
|
||||
void
|
||||
idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
idpf_tx_queue_release(dev->data->tx_queues[qid]);
|
||||
}
|
||||
|
||||
void
|
||||
idpf_stop_queues(struct rte_eth_dev *dev)
|
||||
{
|
||||
|
@ -124,12 +124,15 @@ int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
||||
void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
uint16_t nb_desc, unsigned int socket_id,
|
||||
const struct rte_eth_txconf *tx_conf);
|
||||
int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
||||
int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
||||
int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
||||
void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
|
||||
|
||||
void idpf_stop_queues(struct rte_eth_dev *dev);
|
||||
#endif /* _IDPF_RXTX_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user