ethdev: make queue release callback optional
Some drivers don't need Rx and Tx queue release callback, make them optional. Clean up empty queue release callbacks for some drivers. Signed-off-by: Xueming Li <xuemingl@nvidia.com> Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com> Acked-by: Thomas Monjalon <thomas@monjalon.net> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
parent
d74d3744da
commit
49ed322469
@ -163,16 +163,6 @@ virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void
|
||||
virtual_ethdev_rx_queue_release(void *q __rte_unused)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
virtual_ethdev_tx_queue_release(void *q __rte_unused)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
|
||||
int wait_to_complete __rte_unused)
|
||||
@ -243,8 +233,6 @@ static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
|
||||
.dev_infos_get = virtual_ethdev_info_get,
|
||||
.rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
|
||||
.tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
|
||||
.rx_queue_release = virtual_ethdev_rx_queue_release,
|
||||
.tx_queue_release = virtual_ethdev_tx_queue_release,
|
||||
.link_update = virtual_ethdev_link_update_success,
|
||||
.mac_addr_set = virtual_ethdev_mac_address_set,
|
||||
.stats_get = virtual_ethdev_stats_get,
|
||||
|
@ -438,11 +438,6 @@ eth_dev_close(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
eth_queue_release(void *q __rte_unused)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
eth_link_update(struct rte_eth_dev *dev __rte_unused,
|
||||
int wait_to_complete __rte_unused)
|
||||
@ -606,8 +601,6 @@ static const struct eth_dev_ops ops = {
|
||||
.promiscuous_disable = eth_dev_promiscuous_disable,
|
||||
.rx_queue_setup = eth_rx_queue_setup,
|
||||
.tx_queue_setup = eth_tx_queue_setup,
|
||||
.rx_queue_release = eth_queue_release,
|
||||
.tx_queue_release = eth_queue_release,
|
||||
.link_update = eth_link_update,
|
||||
.stats_get = eth_stats_get,
|
||||
.stats_reset = eth_stats_reset,
|
||||
|
@ -989,11 +989,6 @@ eth_dev_close(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
eth_queue_release(void *q __rte_unused)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
eth_link_update(struct rte_eth_dev *dev __rte_unused,
|
||||
int wait_to_complete __rte_unused)
|
||||
@ -1474,8 +1469,6 @@ static const struct eth_dev_ops ops = {
|
||||
.promiscuous_disable = eth_dev_promiscuous_disable,
|
||||
.rx_queue_setup = eth_rx_queue_setup,
|
||||
.tx_queue_setup = eth_tx_queue_setup,
|
||||
.rx_queue_release = eth_queue_release,
|
||||
.tx_queue_release = eth_queue_release,
|
||||
.link_update = eth_link_update,
|
||||
.stats_get = eth_stats_get,
|
||||
.stats_reset = eth_stats_reset,
|
||||
|
@ -1233,12 +1233,6 @@ dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
}
|
||||
|
||||
static
|
||||
int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
uint16_t nb_desc __rte_unused,
|
||||
@ -1272,11 +1266,6 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
{
|
||||
@ -1571,8 +1560,6 @@ static struct eth_dev_ops dpaa_devops = {
|
||||
|
||||
.rx_queue_setup = dpaa_eth_rx_queue_setup,
|
||||
.tx_queue_setup = dpaa_eth_tx_queue_setup,
|
||||
.rx_queue_release = dpaa_eth_rx_queue_release,
|
||||
.tx_queue_release = dpaa_eth_tx_queue_release,
|
||||
.rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
|
||||
.tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
|
||||
.rxq_info_get = dpaa_rxq_info_get,
|
||||
|
@ -1006,12 +1006,6 @@ dpaa2_dev_rx_queue_release(void *q __rte_unused)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dpaa2_dev_tx_queue_release(void *q __rte_unused)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
{
|
||||
@ -2429,7 +2423,6 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
|
||||
.rx_queue_setup = dpaa2_dev_rx_queue_setup,
|
||||
.rx_queue_release = dpaa2_dev_rx_queue_release,
|
||||
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
|
||||
.tx_queue_release = dpaa2_dev_tx_queue_release,
|
||||
.rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
|
||||
.tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
|
||||
.flow_ctrl_get = dpaa2_flow_ctrl_get,
|
||||
|
@ -288,11 +288,6 @@ ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
|
||||
__rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
|
||||
@ -302,11 +297,6 @@ ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)
|
||||
{
|
||||
}
|
||||
|
||||
/* Statistics collected by each port, VSI, VEB, and S-channel */
|
||||
struct ipn3ke_rpst_eth_stats {
|
||||
uint64_t tx_bytes; /* gotc */
|
||||
@ -2865,9 +2855,7 @@ static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
|
||||
.tx_queue_start = ipn3ke_rpst_tx_queue_start,
|
||||
.tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
|
||||
.rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
|
||||
.rx_queue_release = ipn3ke_rpst_rx_queue_release,
|
||||
.tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
|
||||
.tx_queue_release = ipn3ke_rpst_tx_queue_release,
|
||||
|
||||
.dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
|
||||
.dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
|
||||
|
@ -284,11 +284,6 @@ eth_kni_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
eth_kni_queue_release(void *q __rte_unused)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
|
||||
int wait_to_complete __rte_unused)
|
||||
@ -362,8 +357,6 @@ static const struct eth_dev_ops eth_kni_ops = {
|
||||
.dev_infos_get = eth_kni_dev_info,
|
||||
.rx_queue_setup = eth_kni_rx_queue_setup,
|
||||
.tx_queue_setup = eth_kni_tx_queue_setup,
|
||||
.rx_queue_release = eth_kni_queue_release,
|
||||
.tx_queue_release = eth_kni_queue_release,
|
||||
.link_update = eth_kni_link_update,
|
||||
.stats_get = eth_kni_stats_get,
|
||||
.stats_reset = eth_kni_stats_reset,
|
||||
|
@ -857,11 +857,6 @@ eth_dev_close(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
eth_queue_release(void *q __rte_unused)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
eth_link_update(struct rte_eth_dev *dev __rte_unused,
|
||||
int wait_to_complete __rte_unused)
|
||||
@ -1006,8 +1001,6 @@ static const struct eth_dev_ops ops = {
|
||||
.tx_queue_start = eth_tx_queue_start,
|
||||
.rx_queue_stop = eth_rx_queue_stop,
|
||||
.tx_queue_stop = eth_tx_queue_stop,
|
||||
.rx_queue_release = eth_queue_release,
|
||||
.tx_queue_release = eth_queue_release,
|
||||
.link_update = eth_link_update,
|
||||
.stats_get = eth_stats_get,
|
||||
.stats_reset = eth_stats_reset,
|
||||
|
@ -494,18 +494,6 @@ pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
pfe_rx_queue_release(void *q __rte_unused)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
}
|
||||
|
||||
static void
|
||||
pfe_tx_queue_release(void *q __rte_unused)
|
||||
{
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
}
|
||||
|
||||
static int
|
||||
pfe_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t queue_idx,
|
||||
@ -759,9 +747,7 @@ static const struct eth_dev_ops ops = {
|
||||
.dev_configure = pfe_eth_configure,
|
||||
.dev_infos_get = pfe_eth_info,
|
||||
.rx_queue_setup = pfe_rx_queue_setup,
|
||||
.rx_queue_release = pfe_rx_queue_release,
|
||||
.tx_queue_setup = pfe_tx_queue_setup,
|
||||
.tx_queue_release = pfe_tx_queue_release,
|
||||
.dev_supported_ptypes_get = pfe_supported_ptypes_get,
|
||||
.link_update = pfe_eth_link_update,
|
||||
.promiscuous_enable = pfe_promiscuous_enable,
|
||||
|
@ -225,8 +225,6 @@ eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
eth_queue_release(void *q __rte_unused) { ; }
|
||||
static int
|
||||
eth_link_update(struct rte_eth_dev *dev __rte_unused,
|
||||
int wait_to_complete __rte_unused) { return 0; }
|
||||
@ -272,8 +270,6 @@ static const struct eth_dev_ops ops = {
|
||||
.dev_infos_get = eth_dev_info,
|
||||
.rx_queue_setup = eth_rx_queue_setup,
|
||||
.tx_queue_setup = eth_tx_queue_setup,
|
||||
.rx_queue_release = eth_queue_release,
|
||||
.tx_queue_release = eth_queue_release,
|
||||
.link_update = eth_link_update,
|
||||
.stats_get = eth_stats_get,
|
||||
.stats_reset = eth_stats_reset,
|
||||
|
@ -370,12 +370,6 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_dev_queue_release(void *queue __rte_unused)
|
||||
{
|
||||
/* do nothing */
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
virtio_get_nr_vq(struct virtio_hw *hw)
|
||||
{
|
||||
@ -981,9 +975,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
|
||||
.rx_queue_setup = virtio_dev_rx_queue_setup,
|
||||
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
|
||||
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
|
||||
.rx_queue_release = virtio_dev_queue_release,
|
||||
.tx_queue_setup = virtio_dev_tx_queue_setup,
|
||||
.tx_queue_release = virtio_dev_queue_release,
|
||||
/* collect stats per queue */
|
||||
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
|
||||
.vlan_filter_set = virtio_vlan_filter_set,
|
||||
|
@ -889,6 +889,32 @@ eth_err(uint16_t port_id, int ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
void **rxq = dev->data->rx_queues;
|
||||
|
||||
if (rxq[qid] == NULL)
|
||||
return;
|
||||
|
||||
if (dev->dev_ops->rx_queue_release != NULL)
|
||||
(*dev->dev_ops->rx_queue_release)(rxq[qid]);
|
||||
rxq[qid] = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
|
||||
{
|
||||
void **txq = dev->data->tx_queues;
|
||||
|
||||
if (txq[qid] == NULL)
|
||||
return;
|
||||
|
||||
if (dev->dev_ops->tx_queue_release != NULL)
|
||||
(*dev->dev_ops->tx_queue_release)(txq[qid]);
|
||||
txq[qid] = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
||||
{
|
||||
@ -905,12 +931,10 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
||||
return -(ENOMEM);
|
||||
}
|
||||
} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
|
||||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
eth_dev_rxq_release(dev, i);
|
||||
|
||||
rxq = dev->data->rx_queues;
|
||||
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
(*dev->dev_ops->rx_queue_release)(rxq[i]);
|
||||
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (rxq == NULL)
|
||||
@ -925,12 +949,8 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
||||
dev->data->rx_queues = rxq;
|
||||
|
||||
} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
|
||||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
|
||||
|
||||
rxq = dev->data->rx_queues;
|
||||
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
(*dev->dev_ops->rx_queue_release)(rxq[i]);
|
||||
eth_dev_rxq_release(dev, i);
|
||||
|
||||
rte_free(dev->data->rx_queues);
|
||||
dev->data->rx_queues = NULL;
|
||||
@ -1145,12 +1165,10 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
||||
return -(ENOMEM);
|
||||
}
|
||||
} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
|
||||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
eth_dev_txq_release(dev, i);
|
||||
|
||||
txq = dev->data->tx_queues;
|
||||
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
(*dev->dev_ops->tx_queue_release)(txq[i]);
|
||||
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (txq == NULL)
|
||||
@ -1165,12 +1183,8 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
||||
dev->data->tx_queues = txq;
|
||||
|
||||
} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
|
||||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
|
||||
|
||||
txq = dev->data->tx_queues;
|
||||
|
||||
for (i = nb_queues; i < old_nb_queues; i++)
|
||||
(*dev->dev_ops->tx_queue_release)(txq[i]);
|
||||
eth_dev_txq_release(dev, i);
|
||||
|
||||
rte_free(dev->data->tx_queues);
|
||||
dev->data->tx_queues = NULL;
|
||||
@ -2006,7 +2020,6 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
|
||||
struct rte_eth_dev *dev;
|
||||
struct rte_eth_dev_info dev_info;
|
||||
struct rte_eth_rxconf local_conf;
|
||||
void **rxq;
|
||||
|
||||
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
||||
dev = &rte_eth_devices[port_id];
|
||||
@ -2110,13 +2123,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
|
||||
RTE_ETH_QUEUE_STATE_STOPPED))
|
||||
return -EBUSY;
|
||||
|
||||
rxq = dev->data->rx_queues;
|
||||
if (rxq[rx_queue_id]) {
|
||||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
|
||||
-ENOTSUP);
|
||||
(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
|
||||
rxq[rx_queue_id] = NULL;
|
||||
}
|
||||
eth_dev_rxq_release(dev, rx_queue_id);
|
||||
|
||||
if (rx_conf == NULL)
|
||||
rx_conf = &dev_info.default_rxconf;
|
||||
@ -2189,7 +2196,6 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
|
||||
int ret;
|
||||
struct rte_eth_dev *dev;
|
||||
struct rte_eth_hairpin_cap cap;
|
||||
void **rxq;
|
||||
int i;
|
||||
int count;
|
||||
|
||||
@ -2246,13 +2252,7 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
|
||||
}
|
||||
if (dev->data->dev_started)
|
||||
return -EBUSY;
|
||||
rxq = dev->data->rx_queues;
|
||||
if (rxq[rx_queue_id] != NULL) {
|
||||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
|
||||
-ENOTSUP);
|
||||
(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
|
||||
rxq[rx_queue_id] = NULL;
|
||||
}
|
||||
eth_dev_rxq_release(dev, rx_queue_id);
|
||||
ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
|
||||
nb_rx_desc, conf);
|
||||
if (ret == 0)
|
||||
@ -2269,7 +2269,6 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
|
||||
struct rte_eth_dev *dev;
|
||||
struct rte_eth_dev_info dev_info;
|
||||
struct rte_eth_txconf local_conf;
|
||||
void **txq;
|
||||
int ret;
|
||||
|
||||
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
||||
@ -2314,13 +2313,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
|
||||
RTE_ETH_QUEUE_STATE_STOPPED))
|
||||
return -EBUSY;
|
||||
|
||||
txq = dev->data->tx_queues;
|
||||
if (txq[tx_queue_id]) {
|
||||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
|
||||
-ENOTSUP);
|
||||
(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
|
||||
txq[tx_queue_id] = NULL;
|
||||
}
|
||||
eth_dev_txq_release(dev, tx_queue_id);
|
||||
|
||||
if (tx_conf == NULL)
|
||||
tx_conf = &dev_info.default_txconf;
|
||||
@ -2368,7 +2361,6 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
|
||||
{
|
||||
struct rte_eth_dev *dev;
|
||||
struct rte_eth_hairpin_cap cap;
|
||||
void **txq;
|
||||
int i;
|
||||
int count;
|
||||
int ret;
|
||||
@ -2426,13 +2418,7 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
|
||||
}
|
||||
if (dev->data->dev_started)
|
||||
return -EBUSY;
|
||||
txq = dev->data->tx_queues;
|
||||
if (txq[tx_queue_id] != NULL) {
|
||||
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
|
||||
-ENOTSUP);
|
||||
(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
|
||||
txq[tx_queue_id] = NULL;
|
||||
}
|
||||
eth_dev_txq_release(dev, tx_queue_id);
|
||||
ret = (*dev->dev_ops->tx_hairpin_queue_setup)
|
||||
(dev, tx_queue_id, nb_tx_desc, conf);
|
||||
if (ret == 0)
|
||||
|
Loading…
Reference in New Issue
Block a user