eal: deprecate RTE_FUNC_PTR_* macros

Those macros have no real value and are easily replaced with a simple
if() block.

Existing users have been converted using a new cocci script.
Deprecate them.

Signed-off-by: David Marchand <david.marchand@redhat.com>
This commit is contained in:
David Marchand 2022-08-23 16:16:43 +02:00
parent 99948194a8
commit 8f1d23ece0
31 changed files with 517 additions and 285 deletions

View File

@ -0,0 +1,12 @@
@@
expression cond, ret;
@@
-RTE_FUNC_PTR_OR_ERR_RET(cond, ret);
+if (cond == NULL)
+ return ret;
@@
expression cond;
@@
-RTE_FUNC_PTR_OR_RET(cond);
+if (cond == NULL)
+ return;

View File

@ -14,6 +14,10 @@ Deprecation Notices
* kvargs: The function ``rte_kvargs_process`` will get a new parameter
for returning key match count. It will ease handling of no-match case.
* eal: RTE_FUNC_PTR_OR_* macros have been marked deprecated and will be removed
in the future. Applications can use ``devtools/cocci/func_or_ret.cocci``
to update their code.
* eal: The function ``rte_eal_remote_launch`` will return new error codes
after read or write error on the pipe, instead of calling ``rte_panic``.

View File

@ -84,6 +84,10 @@ API Changes
Also, make sure to start the actual text at the margin.
=======================================================
* eal: RTE_FUNC_PTR_OR_* macros have been marked deprecated and will be removed
in the future. Applications can use ``devtools/cocci/func_or_ret.cocci``
to update their code.
* raw/ifgpa: The function ``rte_pmd_ifpga_get_pci_bus`` has been removed.

View File

@ -58,8 +58,8 @@ qat_pci_get_extra_size(enum qat_device_gen qat_dev_gen)
{
struct qat_dev_hw_spec_funcs *ops_hw =
qat_dev_hw_spec[qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_get_extra_size,
-ENOTSUP);
if (ops_hw->qat_dev_get_extra_size == NULL)
return -ENOTSUP;
return ops_hw->qat_dev_get_extra_size();
}
@ -381,8 +381,8 @@ static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
return -ENODEV;
ops_hw = qat_dev_hw_spec[qat_pci_dev->qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_reset_ring_pairs,
-ENOTSUP);
if (ops_hw->qat_dev_reset_ring_pairs == NULL)
return -ENOTSUP;
if (ops_hw->qat_dev_reset_ring_pairs(qat_pci_dev)) {
QAT_LOG(ERR,
"Cannot reset ring pairs, does pf driver supports pf2vf comms?"

View File

@ -370,8 +370,8 @@ adf_queue_arb_enable(struct qat_pci_device *qat_dev, struct qat_queue *txq,
struct qat_qp_hw_spec_funcs *ops =
qat_qp_hw_spec[qat_dev->qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_adf_arb_enable,
-ENOTSUP);
if (ops->qat_qp_adf_arb_enable == NULL)
return -ENOTSUP;
ops->qat_qp_adf_arb_enable(txq, base_addr, lock);
return 0;
}
@ -383,8 +383,8 @@ adf_queue_arb_disable(enum qat_device_gen qat_dev_gen, struct qat_queue *txq,
struct qat_qp_hw_spec_funcs *ops =
qat_qp_hw_spec[qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_adf_arb_disable,
-ENOTSUP);
if (ops->qat_qp_adf_arb_disable == NULL)
return -ENOTSUP;
ops->qat_qp_adf_arb_disable(txq, base_addr, lock);
return 0;
}
@ -396,8 +396,8 @@ qat_qp_build_ring_base(struct qat_pci_device *qat_dev, void *io_addr,
struct qat_qp_hw_spec_funcs *ops =
qat_qp_hw_spec[qat_dev->qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_build_ring_base,
-ENOTSUP);
if (ops->qat_qp_build_ring_base == NULL)
return -ENOTSUP;
ops->qat_qp_build_ring_base(io_addr, queue);
return 0;
}
@ -409,8 +409,8 @@ qat_qps_per_service(struct qat_pci_device *qat_dev,
struct qat_qp_hw_spec_funcs *ops =
qat_qp_hw_spec[qat_dev->qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_rings_per_service,
-ENOTSUP);
if (ops->qat_qp_rings_per_service == NULL)
return -ENOTSUP;
return ops->qat_qp_rings_per_service(qat_dev, service);
}
@ -421,7 +421,8 @@ qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
struct qat_qp_hw_spec_funcs *ops =
qat_qp_hw_spec[qat_dev->qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_get_hw_data, NULL);
if (ops->qat_qp_get_hw_data == NULL)
return NULL;
return ops->qat_qp_get_hw_data(qat_dev, service, qp_id);
}
@ -431,8 +432,8 @@ qat_read_qp_config(struct qat_pci_device *qat_dev)
struct qat_dev_hw_spec_funcs *ops_hw =
qat_dev_hw_spec[qat_dev->qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops_hw->qat_dev_read_config,
-ENOTSUP);
if (ops_hw->qat_dev_read_config == NULL)
return -ENOTSUP;
return ops_hw->qat_dev_read_config(qat_dev);
}
@ -442,8 +443,8 @@ adf_configure_queues(struct qat_qp *qp, enum qat_device_gen qat_dev_gen)
struct qat_qp_hw_spec_funcs *ops =
qat_qp_hw_spec[qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_adf_configure_queues,
-ENOTSUP);
if (ops->qat_qp_adf_configure_queues == NULL)
return -ENOTSUP;
ops->qat_qp_adf_configure_queues(qp);
return 0;
}
@ -483,8 +484,8 @@ qat_qp_csr_setup(struct qat_pci_device *qat_dev,
struct qat_qp_hw_spec_funcs *ops =
qat_qp_hw_spec[qat_dev->qat_dev_gen];
RTE_FUNC_PTR_OR_ERR_RET(ops->qat_qp_csr_setup,
-ENOTSUP);
if (ops->qat_qp_csr_setup == NULL)
return -ENOTSUP;
ops->qat_qp_csr_setup(qat_dev, io_addr, qp);
return 0;
}

View File

@ -25,8 +25,8 @@ qat_comp_capabilities_info qat_comp_get_capa_info(
if (qat_dev_gen >= QAT_N_GENS)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(qat_comp_gen_dev_ops[qat_dev_gen]
.qat_comp_get_capabilities, ret);
if (qat_comp_gen_dev_ops[qat_dev_gen].qat_comp_get_capabilities == NULL)
return ret;
return qat_comp_gen_dev_ops[qat_dev_gen]
.qat_comp_get_capabilities(qat_dev);
}

View File

@ -541,7 +541,8 @@ rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
sched_ctx = dev->data->dev_private;
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
if (*sched_ctx->ops.option_set == NULL)
return -ENOTSUP;
return (*sched_ctx->ops.option_set)(dev, option_type, option);
}
@ -571,7 +572,8 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
sched_ctx = dev->data->dev_private;
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
if (*sched_ctx->ops.option_get == NULL)
return -ENOTSUP;
return (*sched_ctx->ops.option_get)(dev, option_type, option);
}

View File

@ -160,7 +160,8 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
return -1;
}
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.worker_attach, -ENOTSUP);
if (*sched_ctx->ops.worker_attach == NULL)
return -ENOTSUP;
for (i = 0; i < sched_ctx->nb_workers; i++) {
uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
@ -171,7 +172,8 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
}
}
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
if (*sched_ctx->ops.scheduler_start == NULL)
return -ENOTSUP;
if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
CR_SCHED_LOG(ERR, "Scheduler start failed");

View File

@ -291,7 +291,8 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
if (on > 1)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
if (*dev->dev_ops->vlan_strip_queue_set == NULL)
return -ENOTSUP;
/* The PF has 128 queue pairs and in SRIOV configuration
* those queues will be assigned to VF's, so RXDCTL

View File

@ -281,7 +281,8 @@ lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
}
/* clear stored per queue stats */
RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0);
if (*eth_dev->dev_ops->stats_reset == NULL)
return 0;
return (*eth_dev->dev_ops->stats_reset)(eth_dev);
}

View File

@ -713,7 +713,8 @@ ifpga_rawdev_configure(const struct rte_rawdev *dev,
{
IFPGA_RAWDEV_PMD_FUNC_TRACE();
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
return config ? 0 : 1;
}
@ -726,7 +727,8 @@ ifpga_rawdev_start(struct rte_rawdev *dev)
IFPGA_RAWDEV_PMD_FUNC_TRACE();
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
adapter = ifpga_rawdev_get_priv(dev);
if (!adapter)

View File

@ -77,7 +77,8 @@ static int skeleton_rawdev_configure(const struct rte_rawdev *dev,
SKELETON_PMD_FUNC_TRACE();
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
if (config == NULL || config_size != sizeof(*skeldev_conf)) {
SKELETON_PMD_ERR("Invalid configuration");
@ -107,7 +108,8 @@ static int skeleton_rawdev_start(struct rte_rawdev *dev)
SKELETON_PMD_FUNC_TRACE();
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
skeldev = skeleton_rawdev_get_priv(dev);
@ -170,7 +172,8 @@ static int skeleton_rawdev_close(struct rte_rawdev *dev)
SKELETON_PMD_FUNC_TRACE();
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
skeldev = skeleton_rawdev_get_priv(dev);
@ -213,7 +216,8 @@ static int skeleton_rawdev_reset(struct rte_rawdev *dev)
SKELETON_PMD_FUNC_TRACE();
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
skeldev = skeleton_rawdev_get_priv(dev);
@ -296,7 +300,8 @@ static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
SKELETON_PMD_FUNC_TRACE();
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
skeldev = skeleton_rawdev_get_priv(dev);
@ -318,7 +323,8 @@ static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev)
SKELETON_PMD_FUNC_TRACE();
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
skeldev = skeleton_rawdev_get_priv(dev);
return skeldev->num_queues;
@ -469,7 +475,8 @@ static int skeleton_rawdev_firmware_status_get(struct rte_rawdev *dev,
skeldev = skeleton_rawdev_get_priv(dev);
RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
if (dev == NULL)
return -EINVAL;
if (status_info)
memcpy(status_info, &skeldev->fw.firmware_state,

View File

@ -315,7 +315,8 @@ rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
memset(&dev_info, 0, sizeof(struct rte_compressdev_info));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
if (*dev->dev_ops->dev_infos_get == NULL)
return -ENOTSUP;
(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
if ((dev_info.max_nb_queue_pairs != 0) &&
@ -344,8 +345,8 @@ rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
qp = dev->data->queue_pairs;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
-ENOTSUP);
if (*dev->dev_ops->queue_pair_release == NULL)
return -ENOTSUP;
for (i = nb_qpairs; i < old_nb_queues; i++) {
ret = (*dev->dev_ops->queue_pair_release)(dev, i);
@ -395,8 +396,8 @@ rte_compressdev_queue_pairs_release(struct rte_compressdev *dev)
COMPRESSDEV_LOG(DEBUG, "Free %d queues pairs on device %u",
dev->data->nb_queue_pairs, dev->data->dev_id);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
-ENOTSUP);
if (*dev->dev_ops->queue_pair_release == NULL)
return -ENOTSUP;
for (i = 0; i < num_qps; i++) {
ret = (*dev->dev_ops->queue_pair_release)(dev, i);
@ -430,7 +431,8 @@ rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
return -EBUSY;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (*dev->dev_ops->dev_configure == NULL)
return -ENOTSUP;
/* Setup new number of queue pairs and reconfigure device. */
diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs,
@ -460,7 +462,8 @@ rte_compressdev_start(uint8_t dev_id)
dev = &rte_comp_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (*dev->dev_ops->dev_start == NULL)
return -ENOTSUP;
if (dev->data->dev_started != 0) {
COMPRESSDEV_LOG(ERR,
@ -489,7 +492,8 @@ rte_compressdev_stop(uint8_t dev_id)
dev = &rte_comp_devices[dev_id];
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
if (*dev->dev_ops->dev_stop == NULL)
return;
if (dev->data->dev_started == 0) {
COMPRESSDEV_LOG(ERR,
@ -527,7 +531,8 @@ rte_compressdev_close(uint8_t dev_id)
if (retval < 0)
return retval;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
if (*dev->dev_ops->dev_close == NULL)
return -ENOTSUP;
retval = (*dev->dev_ops->dev_close)(dev);
if (retval < 0)
@ -565,7 +570,8 @@ rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
if (*dev->dev_ops->queue_pair_setup == NULL)
return -ENOTSUP;
return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id,
max_inflight_ops, socket_id);
@ -611,7 +617,8 @@ rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
dev = &rte_comp_devices[dev_id];
memset(stats, 0, sizeof(*stats));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
if (*dev->dev_ops->stats_get == NULL)
return -ENOTSUP;
(*dev->dev_ops->stats_get)(dev, stats);
return 0;
}
@ -628,7 +635,8 @@ rte_compressdev_stats_reset(uint8_t dev_id)
dev = &rte_comp_devices[dev_id];
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
if (*dev->dev_ops->stats_reset == NULL)
return;
(*dev->dev_ops->stats_reset)(dev);
}
@ -647,7 +655,8 @@ rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
memset(dev_info, 0, sizeof(struct rte_compressdev_info));
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
if (*dev->dev_ops->dev_infos_get == NULL)
return;
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->driver_name = dev->device->driver->name;
@ -666,7 +675,8 @@ rte_compressdev_private_xform_create(uint8_t dev_id,
if (xform == NULL || priv_xform == NULL || dev == NULL)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_create, -ENOTSUP);
if (*dev->dev_ops->private_xform_create == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->private_xform_create)(dev, xform, priv_xform);
if (ret < 0) {
COMPRESSDEV_LOG(ERR,
@ -689,7 +699,8 @@ rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
if (dev == NULL || priv_xform == NULL)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_free, -ENOTSUP);
if (*dev->dev_ops->private_xform_free == NULL)
return -ENOTSUP;
ret = dev->dev_ops->private_xform_free(dev, priv_xform);
if (ret < 0) {
COMPRESSDEV_LOG(ERR,
@ -714,7 +725,8 @@ rte_compressdev_stream_create(uint8_t dev_id,
if (xform == NULL || dev == NULL || stream == NULL)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_create, -ENOTSUP);
if (*dev->dev_ops->stream_create == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->stream_create)(dev, xform, stream);
if (ret < 0) {
COMPRESSDEV_LOG(ERR,
@ -738,7 +750,8 @@ rte_compressdev_stream_free(uint8_t dev_id, void *stream)
if (dev == NULL || stream == NULL)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_free, -ENOTSUP);
if (*dev->dev_ops->stream_free == NULL)
return -ENOTSUP;
ret = dev->dev_ops->stream_free(dev, stream);
if (ret < 0) {
COMPRESSDEV_LOG(ERR,

View File

@ -978,7 +978,8 @@ rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
if (*dev->dev_ops->dev_infos_get == NULL)
return -ENOTSUP;
(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
@ -1007,8 +1008,8 @@ rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
qp = dev->data->queue_pairs;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
-ENOTSUP);
if (*dev->dev_ops->queue_pair_release == NULL)
return -ENOTSUP;
for (i = nb_qpairs; i < old_nb_queues; i++) {
ret = (*dev->dev_ops->queue_pair_release)(dev, i);
@ -1041,7 +1042,8 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
return -EBUSY;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (*dev->dev_ops->dev_configure == NULL)
return -ENOTSUP;
rte_spinlock_lock(&rte_cryptodev_callback_lock);
cryptodev_cb_cleanup(dev);
@ -1083,7 +1085,8 @@ rte_cryptodev_start(uint8_t dev_id)
dev = &rte_crypto_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (*dev->dev_ops->dev_start == NULL)
return -ENOTSUP;
if (dev->data->dev_started != 0) {
CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
@ -1116,7 +1119,8 @@ rte_cryptodev_stop(uint8_t dev_id)
dev = &rte_crypto_devices[dev_id];
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
if (*dev->dev_ops->dev_stop == NULL)
return;
if (dev->data->dev_started == 0) {
CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
@ -1163,7 +1167,8 @@ rte_cryptodev_close(uint8_t dev_id)
}
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
if (*dev->dev_ops->dev_close == NULL)
return -ENOTSUP;
retval = (*dev->dev_ops->dev_close)(dev);
rte_cryptodev_trace_close(dev_id, retval);
@ -1262,7 +1267,8 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
return -EBUSY;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
if (*dev->dev_ops->queue_pair_setup == NULL)
return -ENOTSUP;
rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
@ -1557,7 +1563,8 @@ rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
dev = &rte_crypto_devices[dev_id];
memset(stats, 0, sizeof(*stats));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
if (*dev->dev_ops->stats_get == NULL)
return -ENOTSUP;
(*dev->dev_ops->stats_get)(dev, stats);
return 0;
}
@ -1574,7 +1581,8 @@ rte_cryptodev_stats_reset(uint8_t dev_id)
dev = &rte_crypto_devices[dev_id];
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
if (*dev->dev_ops->stats_reset == NULL)
return;
(*dev->dev_ops->stats_reset)(dev);
}
@ -1592,7 +1600,8 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
if (*dev->dev_ops->dev_infos_get == NULL)
return;
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->driver_name = dev->device->driver->name;
@ -1739,7 +1748,8 @@ rte_cryptodev_sym_session_init(uint8_t dev_id,
if (index >= sess->nb_drivers)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
if (*dev->dev_ops->sym_session_configure == NULL)
return -ENOTSUP;
if (sess->sess_data[index].refcnt == 0) {
ret = dev->dev_ops->sym_session_configure(dev, xforms,
@ -1968,7 +1978,8 @@ rte_cryptodev_asym_session_create(uint8_t dev_id,
/* Clear device session pointer.*/
memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure, -ENOTSUP);
if (*dev->dev_ops->asym_session_configure == NULL)
return -ENOTSUP;
if (sess->sess_private_data[0] == 0) {
ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
@ -2007,7 +2018,8 @@ rte_cryptodev_sym_session_clear(uint8_t dev_id,
if (--sess->sess_data[driver_id].refcnt != 0)
return -EBUSY;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
if (*dev->dev_ops->sym_session_clear == NULL)
return -ENOTSUP;
dev->dev_ops->sym_session_clear(dev, sess);
@ -2054,7 +2066,8 @@ rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
if (dev == NULL || sess == NULL)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
if (*dev->dev_ops->asym_session_clear == NULL)
return -ENOTSUP;
dev->dev_ops->asym_session_clear(dev, sess);

View File

@ -422,7 +422,8 @@ rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
if (*dev->dev_ops->dev_info_get == NULL)
return -ENOTSUP;
memset(dev_info, 0, sizeof(struct rte_dma_info));
ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
sizeof(struct rte_dma_info));
@ -474,7 +475,8 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (*dev->dev_ops->dev_configure == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
sizeof(struct rte_dma_conf));
if (ret == 0)
@ -557,7 +559,8 @@ rte_dma_close(int16_t dev_id)
return -EBUSY;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
if (*dev->dev_ops->dev_close == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->dev_close)(dev);
if (ret == 0)
dma_release(dev);
@ -650,7 +653,8 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
if (*dev->dev_ops->vchan_setup == NULL)
return -ENOTSUP;
return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
sizeof(struct rte_dma_vchan_conf));
}
@ -670,7 +674,8 @@ rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
if (*dev->dev_ops->stats_get == NULL)
return -ENOTSUP;
memset(stats, 0, sizeof(struct rte_dma_stats));
return (*dev->dev_ops->stats_get)(dev, vchan, stats,
sizeof(struct rte_dma_stats));
@ -691,7 +696,8 @@ rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
if (*dev->dev_ops->stats_reset == NULL)
return -ENOTSUP;
return (*dev->dev_ops->stats_reset)(dev, vchan);
}
@ -708,7 +714,8 @@ rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP);
if (*dev->dev_ops->vchan_status == NULL)
return -ENOTSUP;
return (*dev->dev_ops->vchan_status)(dev, vchan, status);
}

View File

@ -860,7 +860,8 @@ rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
#ifdef RTE_DMADEV_DEBUG
if (!rte_dma_is_valid(dev_id) || length == 0)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*obj->copy, -ENOTSUP);
if (*obj->copy == NULL)
return -ENOTSUP;
#endif
return (*obj->copy)(obj->dev_private, vchan, src, dst, length, flags);
@ -911,7 +912,8 @@ rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL ||
nb_src == 0 || nb_dst == 0)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*obj->copy_sg, -ENOTSUP);
if (*obj->copy_sg == NULL)
return -ENOTSUP;
#endif
return (*obj->copy_sg)(obj->dev_private, vchan, src, dst, nb_src,
@ -957,7 +959,8 @@ rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
#ifdef RTE_DMADEV_DEBUG
if (!rte_dma_is_valid(dev_id) || length == 0)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*obj->fill, -ENOTSUP);
if (*obj->fill == NULL)
return -ENOTSUP;
#endif
return (*obj->fill)(obj->dev_private, vchan, pattern, dst, length,
@ -990,7 +993,8 @@ rte_dma_submit(int16_t dev_id, uint16_t vchan)
#ifdef RTE_DMADEV_DEBUG
if (!rte_dma_is_valid(dev_id))
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*obj->submit, -ENOTSUP);
if (*obj->submit == NULL)
return -ENOTSUP;
#endif
return (*obj->submit)(obj->dev_private, vchan);
@ -1033,7 +1037,8 @@ rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
#ifdef RTE_DMADEV_DEBUG
if (!rte_dma_is_valid(dev_id) || nb_cpls == 0)
return 0;
RTE_FUNC_PTR_OR_ERR_RET(*obj->completed, 0);
if (*obj->completed == NULL)
return 0;
#endif
/* Ensure the pointer values are non-null to simplify drivers.
@ -1095,7 +1100,8 @@ rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
#ifdef RTE_DMADEV_DEBUG
if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL)
return 0;
RTE_FUNC_PTR_OR_ERR_RET(*obj->completed_status, 0);
if (*obj->completed_status == NULL)
return 0;
#endif
if (last_idx == NULL)
@ -1129,7 +1135,8 @@ rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan)
#ifdef RTE_DMADEV_DEBUG
if (!rte_dma_is_valid(dev_id))
return 0;
RTE_FUNC_PTR_OR_ERR_RET(*obj->burst_capacity, 0);
if (*obj->burst_capacity == NULL)
return 0;
#endif
return (*obj->burst_capacity)(obj->dev_private, vchan);
}

View File

@ -20,6 +20,7 @@ extern "C" {
#include <stdio.h>
#include <rte_config.h>
#include <rte_common.h>
#include <rte_compat.h>
#include <rte_log.h>
@ -37,12 +38,14 @@ typedef void (*rte_dev_event_cb_fn)(const char *device_name,
void *cb_arg);
/* Macros to check for invalid function pointers */
#define RTE_FUNC_PTR_OR_ERR_RET(func, retval) do { \
#define RTE_FUNC_PTR_OR_ERR_RET(func, retval) RTE_DEPRECATED(RTE_FUNC_PTR_OR_ERR_RET) \
do { \
if ((func) == NULL) \
return retval; \
} while (0)
#define RTE_FUNC_PTR_OR_RET(func) do { \
#define RTE_FUNC_PTR_OR_RET(func) RTE_DEPRECATED(RTE_FUNC_PTR_OR_RET) \
do { \
if ((func) == NULL) \
return; \
} while (0)

View File

@ -266,7 +266,8 @@ rte_eth_dev_create(struct rte_device *device, const char *name,
struct rte_eth_dev *ethdev;
int retval;
RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
if (*ethdev_init == NULL)
return -EINVAL;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
ethdev = rte_eth_dev_allocate(name);
@ -330,7 +331,8 @@ rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
if (!ethdev)
return -ENODEV;
RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
if (*ethdev_uninit == NULL)
return -EINVAL;
ret = ethdev_uninit(ethdev);
if (ret)
@ -560,8 +562,8 @@ rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
/* No need to check the validity again. */
dev = &rte_eth_devices[cur_port];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
-ENOTSUP);
if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
return -ENOTSUP;
return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
peer_info, direction);
@ -575,8 +577,8 @@ rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
/* No need to check the validity again. */
dev = &rte_eth_devices[cur_port];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
-ENOTSUP);
if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
return -ENOTSUP;
return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
direction);
@ -596,8 +598,8 @@ rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
/* No need to check the validity again. */
dev = &rte_eth_devices[peer_port];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
-ENOTSUP);
if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
return -ENOTSUP;
return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
cur_info, peer_info, direction);

View File

@ -130,7 +130,8 @@ rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev,
if (!eth_dev)
return -ENOMEM;
RTE_FUNC_PTR_OR_ERR_RET(*dev_init, -EINVAL);
if (*dev_init == NULL)
return -EINVAL;
ret = dev_init(eth_dev);
if (ret)
rte_eth_dev_release_port(eth_dev);

View File

@ -719,7 +719,8 @@ rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
if (ret != 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
if (*dev->dev_ops->rx_queue_start == NULL)
return -ENOTSUP;
if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
RTE_ETHDEV_LOG(INFO,
@ -751,7 +752,8 @@ rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
if (ret != 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
if (*dev->dev_ops->rx_queue_stop == NULL)
return -ENOTSUP;
if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
RTE_ETHDEV_LOG(INFO,
@ -790,7 +792,8 @@ rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
if (ret != 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
if (*dev->dev_ops->tx_queue_start == NULL)
return -ENOTSUP;
if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
RTE_ETHDEV_LOG(INFO,
@ -822,7 +825,8 @@ rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
if (ret != 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
if (*dev->dev_ops->tx_queue_stop == NULL)
return -ENOTSUP;
if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
RTE_ETHDEV_LOG(INFO,
@ -1077,7 +1081,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (*dev->dev_ops->dev_configure == NULL)
return -ENOTSUP;
if (dev->data->dev_started) {
RTE_ETHDEV_LOG(ERR,
@ -1447,7 +1452,8 @@ rte_eth_dev_start(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (*dev->dev_ops->dev_start == NULL)
return -ENOTSUP;
if (dev->data->dev_configured == 0) {
RTE_ETHDEV_LOG(INFO,
@ -1493,7 +1499,8 @@ rte_eth_dev_start(uint16_t port_id)
}
if (dev->data->dev_conf.intr_conf.lsc == 0) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
if (*dev->dev_ops->link_update == NULL)
return -ENOTSUP;
(*dev->dev_ops->link_update)(dev, 0);
}
@ -1513,7 +1520,8 @@ rte_eth_dev_stop(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
if (*dev->dev_ops->dev_stop == NULL)
return -ENOTSUP;
if (dev->data->dev_started == 0) {
RTE_ETHDEV_LOG(INFO,
@ -1541,7 +1549,8 @@ rte_eth_dev_set_link_up(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
if (*dev->dev_ops->dev_set_link_up == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
}
@ -1553,7 +1562,8 @@ rte_eth_dev_set_link_down(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
if (*dev->dev_ops->dev_set_link_down == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
}
@ -1579,7 +1589,8 @@ rte_eth_dev_close(uint16_t port_id)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
if (*dev->dev_ops->dev_close == NULL)
return -ENOTSUP;
*lasterr = (*dev->dev_ops->dev_close)(dev);
if (*lasterr != 0)
lasterr = &binerr;
@ -1599,7 +1610,8 @@ rte_eth_dev_reset(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
if (*dev->dev_ops->dev_reset == NULL)
return -ENOTSUP;
ret = rte_eth_dev_stop(port_id);
if (ret != 0) {
@ -1624,7 +1636,8 @@ rte_eth_dev_is_removed(uint16_t port_id)
if (dev->state == RTE_ETH_DEV_REMOVED)
return 1;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
if (*dev->dev_ops->is_removed == NULL)
return 0;
ret = dev->dev_ops->is_removed(dev);
if (ret != 0)
@ -1725,7 +1738,8 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
if (*dev->dev_ops->rx_queue_setup == NULL)
return -ENOTSUP;
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
@ -1928,8 +1942,8 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
if (ret != 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
-ENOTSUP);
if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
return -ENOTSUP;
/* if nb_rx_desc is zero use max number of desc from the driver. */
if (nb_rx_desc == 0)
nb_rx_desc = cap.max_nb_desc;
@ -1990,7 +2004,8 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
if (*dev->dev_ops->tx_queue_setup == NULL)
return -ENOTSUP;
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
@ -2094,8 +2109,8 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
if (ret != 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
-ENOTSUP);
if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
return -ENOTSUP;
/* if nb_rx_desc is zero use max number of desc from the driver. */
if (nb_tx_desc == 0)
nb_tx_desc = cap.max_nb_desc;
@ -2152,7 +2167,8 @@ rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
return -EBUSY;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
if (*dev->dev_ops->hairpin_bind == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
if (ret != 0)
RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
@ -2176,7 +2192,8 @@ rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
return -EBUSY;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
if (*dev->dev_ops->hairpin_unbind == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
if (ret != 0)
RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
@ -2210,8 +2227,8 @@ rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
-ENOTSUP);
if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
len, direction);
@ -2282,7 +2299,8 @@ rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
if (*dev->dev_ops->tx_done_cleanup == NULL)
return -ENOTSUP;
/* Call driver to free pending mbufs. */
ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
@ -2302,7 +2320,8 @@ rte_eth_promiscuous_enable(uint16_t port_id)
if (dev->data->promiscuous == 1)
return 0;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
if (*dev->dev_ops->promiscuous_enable == NULL)
return -ENOTSUP;
diag = (*dev->dev_ops->promiscuous_enable)(dev);
dev->data->promiscuous = (diag == 0) ? 1 : 0;
@ -2322,7 +2341,8 @@ rte_eth_promiscuous_disable(uint16_t port_id)
if (dev->data->promiscuous == 0)
return 0;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
if (*dev->dev_ops->promiscuous_disable == NULL)
return -ENOTSUP;
dev->data->promiscuous = 0;
diag = (*dev->dev_ops->promiscuous_disable)(dev);
@ -2355,7 +2375,8 @@ rte_eth_allmulticast_enable(uint16_t port_id)
if (dev->data->all_multicast == 1)
return 0;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
if (*dev->dev_ops->allmulticast_enable == NULL)
return -ENOTSUP;
diag = (*dev->dev_ops->allmulticast_enable)(dev);
dev->data->all_multicast = (diag == 0) ? 1 : 0;
@ -2374,7 +2395,8 @@ rte_eth_allmulticast_disable(uint16_t port_id)
if (dev->data->all_multicast == 0)
return 0;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
if (*dev->dev_ops->allmulticast_disable == NULL)
return -ENOTSUP;
dev->data->all_multicast = 0;
diag = (*dev->dev_ops->allmulticast_disable)(dev);
if (diag != 0)
@ -2411,7 +2433,8 @@ rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
if (*dev->dev_ops->link_update == NULL)
return -ENOTSUP;
(*dev->dev_ops->link_update)(dev, 1);
*eth_link = dev->data->dev_link;
}
@ -2436,7 +2459,8 @@ rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
if (*dev->dev_ops->link_update == NULL)
return -ENOTSUP;
(*dev->dev_ops->link_update)(dev, 0);
*eth_link = dev->data->dev_link;
}
@ -2513,7 +2537,8 @@ rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
memset(stats, 0, sizeof(*stats));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
if (*dev->dev_ops->stats_get == NULL)
return -ENOTSUP;
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
}
@ -2527,7 +2552,8 @@ rte_eth_stats_reset(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
if (*dev->dev_ops->stats_reset == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->stats_reset)(dev);
if (ret != 0)
return eth_err(port_id, ret);
@ -3043,7 +3069,8 @@ eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
if (*dev->dev_ops->queue_stats_mapping_set == NULL)
return -ENOTSUP;
return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
}
@ -3080,7 +3107,8 @@ rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
if (*dev->dev_ops->fw_version_get == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
fw_version, fw_size));
}
@ -3121,7 +3149,8 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
RTE_ETHER_CRC_LEN;
dev_info->max_mtu = UINT16_MAX;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
if (*dev->dev_ops->dev_infos_get == NULL)
return -ENOTSUP;
diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
if (diag != 0) {
/* Cleanup already filled in device information */
@ -3182,7 +3211,8 @@ rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
return 0;
all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
if (!all_ptypes)
@ -3360,7 +3390,8 @@ rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
if (*dev->dev_ops->mtu_set == NULL)
return -ENOTSUP;
/*
* Check if the device supports dev_infos_get, if it does not
@ -3413,7 +3444,8 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
port_id, vlan_id);
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
if (*dev->dev_ops->vlan_filter_set == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
if (ret == 0) {
@ -3448,7 +3480,8 @@ rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
if (*dev->dev_ops->vlan_strip_queue_set == NULL)
return -ENOTSUP;
(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
return 0;
@ -3464,7 +3497,8 @@ rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
if (*dev->dev_ops->vlan_tpid_set == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
tpid));
}
@ -3549,7 +3583,8 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
if (*dev->dev_ops->vlan_offload_set == NULL)
return -ENOTSUP;
dev->data->dev_conf.rxmode.offloads = dev_offloads;
ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
if (ret) {
@ -3594,7 +3629,8 @@ rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
if (*dev->dev_ops->vlan_pvid_set == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
}
@ -3613,7 +3649,8 @@ rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
if (*dev->dev_ops->flow_ctrl_get == NULL)
return -ENOTSUP;
memset(fc_conf, 0, sizeof(*fc_conf));
return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
}
@ -3638,7 +3675,8 @@ rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
if (*dev->dev_ops->flow_ctrl_set == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
}
@ -3899,7 +3937,8 @@ rte_eth_dev_rss_reta_update(uint16_t port_id,
return -ENOTSUP;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
if (*dev->dev_ops->reta_update == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
reta_size));
}
@ -3927,7 +3966,8 @@ rte_eth_dev_rss_reta_query(uint16_t port_id,
if (ret < 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
if (*dev->dev_ops->reta_query == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
reta_size));
}
@ -3971,7 +4011,8 @@ rte_eth_dev_rss_hash_update(uint16_t port_id,
return -ENOTSUP;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
if (*dev->dev_ops->rss_hash_update == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
rss_conf));
}
@ -3992,7 +4033,8 @@ rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
if (*dev->dev_ops->rss_hash_conf_get == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
rss_conf));
}
@ -4018,7 +4060,8 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
if (*dev->dev_ops->udp_tunnel_port_add == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
udp_tunnel));
}
@ -4044,7 +4087,8 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
if (*dev->dev_ops->udp_tunnel_port_del == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
udp_tunnel));
}
@ -4057,7 +4101,8 @@ rte_eth_led_on(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
if (*dev->dev_ops->dev_led_on == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
}
@ -4069,7 +4114,8 @@ rte_eth_led_off(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
if (*dev->dev_ops->dev_led_off == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
}
@ -4091,7 +4137,8 @@ rte_eth_fec_get_capability(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
if (*dev->dev_ops->fec_get_capability == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
return ret;
@ -4112,7 +4159,8 @@ rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
if (*dev->dev_ops->fec_get == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
}
@ -4124,7 +4172,8 @@ rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
if (*dev->dev_ops->fec_set == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
}
@ -4173,7 +4222,8 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
if (*dev->dev_ops->mac_addr_add == NULL)
return -ENOTSUP;
if (rte_is_zero_ether_addr(addr)) {
RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
@ -4231,7 +4281,8 @@ rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
if (*dev->dev_ops->mac_addr_remove == NULL)
return -ENOTSUP;
index = eth_dev_get_mac_addr_index(port_id, addr);
if (index == 0) {
@ -4273,7 +4324,8 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
if (!rte_is_valid_assigned_ether_addr(addr))
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
if (*dev->dev_ops->mac_addr_set == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
if (ret < 0)
@ -4359,7 +4411,8 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
}
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
if (*dev->dev_ops->uc_hash_table_set == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
if (ret == 0) {
/* Update address in NIC data structure */
@ -4382,7 +4435,8 @@ rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
if (*dev->dev_ops->uc_all_hash_table_set == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
on));
}
@ -4418,7 +4472,8 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
if (*dev->dev_ops->set_queue_rate_limit == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
queue_idx, tx_rate));
}
@ -4444,7 +4499,8 @@ int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
port_id);
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_avail_thresh_set, -ENOTSUP);
if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
queue_id, avail_thresh));
}
@ -4462,7 +4518,8 @@ int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
if (*queue_id >= dev->data->nb_rx_queues)
*queue_id = 0;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_avail_thresh_query, -ENOTSUP);
if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
queue_id, avail_thresh));
}
@ -4736,7 +4793,8 @@ rte_eth_dev_rx_intr_enable(uint16_t port_id,
if (ret != 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
if (*dev->dev_ops->rx_queue_intr_enable == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
}
@ -4754,7 +4812,8 @@ rte_eth_dev_rx_intr_disable(uint16_t port_id,
if (ret != 0)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
if (*dev->dev_ops->rx_queue_intr_disable == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
}
@ -5018,7 +5077,8 @@ rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
if (*dev->dev_ops->rxq_info_get == NULL)
return -ENOTSUP;
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
@ -5063,7 +5123,8 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
if (*dev->dev_ops->txq_info_get == NULL)
return -ENOTSUP;
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
@ -5093,7 +5154,8 @@ rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
if (*dev->dev_ops->rx_burst_mode_get == NULL)
return -ENOTSUP;
memset(mode, 0, sizeof(*mode));
return eth_err(port_id,
dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
@ -5120,7 +5182,8 @@ rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
if (*dev->dev_ops->tx_burst_mode_get == NULL)
return -ENOTSUP;
memset(mode, 0, sizeof(*mode));
return eth_err(port_id,
dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
@ -5147,7 +5210,8 @@ rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
if (*dev->dev_ops->get_monitor_addr == NULL)
return -ENOTSUP;
return eth_err(port_id,
dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
}
@ -5162,7 +5226,8 @@ rte_eth_dev_set_mc_addr_list(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
if (*dev->dev_ops->set_mc_addr_list == NULL)
return -ENOTSUP;
return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
mc_addr_set, nb_mc_addr));
}
@ -5175,7 +5240,8 @@ rte_eth_timesync_enable(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
if (*dev->dev_ops->timesync_enable == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
}
@ -5187,7 +5253,8 @@ rte_eth_timesync_disable(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
if (*dev->dev_ops->timesync_disable == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
}
@ -5207,7 +5274,8 @@ rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
(dev, timestamp, flags));
}
@ -5228,7 +5296,8 @@ rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
(dev, timestamp));
}
@ -5241,7 +5310,8 @@ rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
if (*dev->dev_ops->timesync_adjust_time == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
}
@ -5260,7 +5330,8 @@ rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
if (*dev->dev_ops->timesync_read_time == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
timestamp));
}
@ -5280,7 +5351,8 @@ rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
if (*dev->dev_ops->timesync_write_time == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
timestamp));
}
@ -5299,7 +5371,8 @@ rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
if (*dev->dev_ops->read_clock == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
}
@ -5318,7 +5391,8 @@ rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
if (*dev->dev_ops->get_reg == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
}
@ -5330,7 +5404,8 @@ rte_eth_dev_get_eeprom_length(uint16_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
if (*dev->dev_ops->get_eeprom_length == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
}
@ -5349,7 +5424,8 @@ rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
if (*dev->dev_ops->get_eeprom == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
}
@ -5368,7 +5444,8 @@ rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
if (*dev->dev_ops->set_eeprom == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
}
@ -5388,7 +5465,8 @@ rte_eth_dev_get_module_info(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
if (*dev->dev_ops->get_module_info == NULL)
return -ENOTSUP;
return (*dev->dev_ops->get_module_info)(dev, modinfo);
}
@ -5422,7 +5500,8 @@ rte_eth_dev_get_module_eeprom(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
if (*dev->dev_ops->get_module_eeprom == NULL)
return -ENOTSUP;
return (*dev->dev_ops->get_module_eeprom)(dev, info);
}
@ -5444,7 +5523,8 @@ rte_eth_dev_get_dcb_info(uint16_t port_id,
memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
if (*dev->dev_ops->get_dcb_info == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
}
@ -5500,7 +5580,8 @@ rte_eth_dev_hairpin_capability_get(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
if (*dev->dev_ops->hairpin_cap_get == NULL)
return -ENOTSUP;
memset(cap, 0, sizeof(*cap));
return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
}
@ -5775,7 +5856,8 @@ rte_eth_representor_info_get(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
if (*dev->dev_ops->representor_info_get == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
}
@ -5799,7 +5881,8 @@ rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
if (*dev->dev_ops->rx_metadata_negotiate == NULL)
return -ENOTSUP;
return eth_err(port_id,
(*dev->dev_ops->rx_metadata_negotiate)(dev, features));
}
@ -5826,8 +5909,8 @@ rte_eth_ip_reassembly_capability_get(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get,
-ENOTSUP);
if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
return -ENOTSUP;
memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
@ -5856,8 +5939,8 @@ rte_eth_ip_reassembly_conf_get(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get,
-ENOTSUP);
if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
return -ENOTSUP;
memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
return eth_err(port_id,
(*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
@ -5894,8 +5977,8 @@ rte_eth_ip_reassembly_conf_set(uint16_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set,
-ENOTSUP);
if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
return -ENOTSUP;
return eth_err(port_id,
(*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
}
@ -5913,7 +5996,8 @@ rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP);
if (*dev->dev_ops->eth_dev_priv_dump == NULL)
return -ENOTSUP;
return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
}

View File

@ -5710,7 +5710,8 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
qd = p->rxq.data[queue_id];
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
RTE_FUNC_PTR_OR_ERR_RET(*p->rx_queue_count, -ENOTSUP);
if (*p->rx_queue_count == NULL)
return -ENOTSUP;
if (qd == NULL)
return -EINVAL;
@ -5784,7 +5785,8 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
if (qd == NULL)
return -ENODEV;
#endif
RTE_FUNC_PTR_OR_ERR_RET(*p->rx_descriptor_status, -ENOTSUP);
if (*p->rx_descriptor_status == NULL)
return -ENOTSUP;
return (*p->rx_descriptor_status)(qd, offset);
}
@ -5854,7 +5856,8 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
if (qd == NULL)
return -ENODEV;
#endif
RTE_FUNC_PTR_OR_ERR_RET(*p->tx_descriptor_status, -ENOTSUP);
if (*p->tx_descriptor_status == NULL)
return -ENOTSUP;
return (*p->tx_descriptor_status)(qd, offset);
}

View File

@ -974,9 +974,8 @@ rte_event_crypto_adapter_queue_pair_add(uint8_t id,
adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
RTE_FUNC_PTR_OR_ERR_RET(
*dev->dev_ops->crypto_adapter_queue_pair_add,
-ENOTSUP);
if (*dev->dev_ops->crypto_adapter_queue_pair_add == NULL)
return -ENOTSUP;
if (dev_info->qpairs == NULL) {
dev_info->qpairs =
rte_zmalloc_socket(adapter->mem_name,
@ -1076,9 +1075,8 @@ rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
RTE_FUNC_PTR_OR_ERR_RET(
*dev->dev_ops->crypto_adapter_queue_pair_del,
-ENOTSUP);
if (*dev->dev_ops->crypto_adapter_queue_pair_del == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
dev_info->dev,
queue_pair_id);

View File

@ -2657,8 +2657,8 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
dev_info = &rx_adapter->eth_devices[eth_dev_id];
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
-ENOTSUP);
if (*dev->dev_ops->eth_rx_adapter_queue_add == NULL)
return -ENOTSUP;
if (dev_info->rx_queue == NULL) {
dev_info->rx_queue =
rte_zmalloc_socket(rx_adapter->mem_name,
@ -2752,8 +2752,8 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
dev_info = &rx_adapter->eth_devices[eth_dev_id];
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
-ENOTSUP);
if (*dev->dev_ops->eth_rx_adapter_queue_del == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
&rte_eth_devices[eth_dev_id],
rx_queue_id);
@ -2863,9 +2863,8 @@ rte_event_eth_rx_adapter_vector_limits_get(
}
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
RTE_FUNC_PTR_OR_ERR_RET(
*dev->dev_ops->eth_rx_adapter_vector_limits_get,
-ENOTSUP);
if (*dev->dev_ops->eth_rx_adapter_vector_limits_get == NULL)
return -ENOTSUP;
ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
dev, &rte_eth_devices[eth_port_id], limits);
} else {

View File

@ -92,7 +92,8 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
memset(dev_info, 0, sizeof(struct rte_event_dev_info));
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
if (*dev->dev_ops->dev_infos_get == NULL)
return -ENOTSUP;
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
@ -216,7 +217,8 @@ event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
if (nb_queues != 0) {
queues_cfg = dev->data->queues_cfg;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
if (*dev->dev_ops->queue_release == NULL)
return -ENOTSUP;
for (i = nb_queues; i < old_nb_queues; i++)
(*dev->dev_ops->queue_release)(dev, i);
@ -229,7 +231,8 @@ event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
sizeof(queues_cfg[0]) * new_qs);
}
} else {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
if (*dev->dev_ops->queue_release == NULL)
return -ENOTSUP;
for (i = nb_queues; i < old_nb_queues; i++)
(*dev->dev_ops->queue_release)(dev, i);
@ -254,7 +257,8 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
dev->data->dev_id);
if (nb_ports != 0) { /* re-config */
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
if (*dev->dev_ops->port_release == NULL)
return -ENOTSUP;
ports = dev->data->ports;
ports_cfg = dev->data->ports_cfg;
@ -279,7 +283,8 @@ event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
}
} else {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
if (*dev->dev_ops->port_release == NULL)
return -ENOTSUP;
ports = dev->data->ports;
for (i = nb_ports; i < old_nb_ports; i++) {
@ -303,8 +308,10 @@ rte_event_dev_configure(uint8_t dev_id,
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (*dev->dev_ops->dev_infos_get == NULL)
return -ENOTSUP;
if (*dev->dev_ops->dev_configure == NULL)
return -ENOTSUP;
if (dev->data->dev_started) {
RTE_EDEV_LOG_ERR(
@ -509,7 +516,8 @@ rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
if (*dev->dev_ops->queue_def_conf == NULL)
return -ENOTSUP;
memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
return 0;
@ -595,11 +603,12 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
return -EBUSY;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
if (*dev->dev_ops->queue_setup == NULL)
return -ENOTSUP;
if (queue_conf == NULL) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
-ENOTSUP);
if (*dev->dev_ops->queue_def_conf == NULL)
return -ENOTSUP;
(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
queue_conf = &def_conf;
}
@ -635,7 +644,8 @@ rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
if (*dev->dev_ops->port_def_conf == NULL)
return -ENOTSUP;
memset(port_conf, 0, sizeof(struct rte_event_port_conf));
(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
return 0;
@ -706,11 +716,12 @@ rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
return -EBUSY;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
if (*dev->dev_ops->port_setup == NULL)
return -ENOTSUP;
if (port_conf == NULL) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
-ENOTSUP);
if (*dev->dev_ops->port_def_conf == NULL)
return -ENOTSUP;
(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
port_conf = &def_conf;
}
@ -896,7 +907,8 @@ rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
return -ENOTSUP;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_attr_set, -ENOTSUP);
if (*dev->dev_ops->queue_attr_set == NULL)
return -ENOTSUP;
return (*dev->dev_ops->queue_attr_set)(dev, queue_id, attr_id,
attr_value);
}
@ -1045,7 +1057,8 @@ rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
* This allows PMDs which handle unlink synchronously to not implement
* this function at all.
*/
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
if (*dev->dev_ops->port_unlinks_in_progress == NULL)
return 0;
return (*dev->dev_ops->port_unlinks_in_progress)(dev,
dev->data->ports[port_id]);
@ -1087,7 +1100,8 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
if (*dev->dev_ops->timeout_ticks == NULL)
return -ENOTSUP;
if (timeout_ticks == NULL)
return -EINVAL;
@ -1119,7 +1133,8 @@ rte_event_dev_dump(uint8_t dev_id, FILE *f)
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
if (*dev->dev_ops->dump == NULL)
return -ENOTSUP;
if (f == NULL)
return -EINVAL;
@ -1285,7 +1300,8 @@ rte_event_dev_start(uint8_t dev_id)
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (*dev->dev_ops->dev_start == NULL)
return -ENOTSUP;
if (dev->data->dev_started != 0) {
RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
@ -1331,7 +1347,8 @@ rte_event_dev_stop(uint8_t dev_id)
RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
dev = &rte_eventdevs[dev_id];
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
if (*dev->dev_ops->dev_stop == NULL)
return;
if (dev->data->dev_started == 0) {
RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
@ -1352,7 +1369,8 @@ rte_event_dev_close(uint8_t dev_id)
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_eventdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
if (*dev->dev_ops->dev_close == NULL)
return -ENOTSUP;
/* Device must be stopped before it can be closed */
if (dev->data->dev_started == 1) {

View File

@ -154,7 +154,8 @@ rte_mempool_ops_get_info(const struct rte_mempool *mp,
ops = rte_mempool_get_ops(mp->ops_index);
RTE_FUNC_PTR_OR_ERR_RET(ops->get_info, -ENOTSUP);
if (ops->get_info == NULL)
return -ENOTSUP;
return ops->get_info(mp, info);
}

View File

@ -71,12 +71,14 @@ rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info,
int ret = 0;
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
if (dev_info == NULL)
return -EINVAL;
rawdev = &rte_rawdevs[dev_id];
if (dev_info->dev_private != NULL) {
RTE_FUNC_PTR_OR_ERR_RET(*rawdev->dev_ops->dev_info_get, -ENOTSUP);
if (*rawdev->dev_ops->dev_info_get == NULL)
return -ENOTSUP;
ret = (*rawdev->dev_ops->dev_info_get)(rawdev,
dev_info->dev_private,
dev_private_size);
@ -97,11 +99,13 @@ rte_rawdev_configure(uint16_t dev_id, struct rte_rawdev_info *dev_conf,
int diag;
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
if (dev_conf == NULL)
return -EINVAL;
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (*dev->dev_ops->dev_configure == NULL)
return -ENOTSUP;
if (dev->started) {
RTE_RDEV_ERR(
@ -131,7 +135,8 @@ rte_rawdev_queue_conf_get(uint16_t dev_id,
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
if (*dev->dev_ops->queue_def_conf == NULL)
return -ENOTSUP;
return (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf,
queue_conf_size);
}
@ -147,7 +152,8 @@ rte_rawdev_queue_setup(uint16_t dev_id,
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
if (*dev->dev_ops->queue_setup == NULL)
return -ENOTSUP;
return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf,
queue_conf_size);
}
@ -160,7 +166,8 @@ rte_rawdev_queue_release(uint16_t dev_id, uint16_t queue_id)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
if (*dev->dev_ops->queue_release == NULL)
return -ENOTSUP;
return (*dev->dev_ops->queue_release)(dev, queue_id);
}
@ -172,7 +179,8 @@ rte_rawdev_queue_count(uint16_t dev_id)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_count, -ENOTSUP);
if (*dev->dev_ops->queue_count == NULL)
return -ENOTSUP;
return (*dev->dev_ops->queue_count)(dev);
}
@ -186,7 +194,8 @@ rte_rawdev_get_attr(uint16_t dev_id,
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->attr_get, -ENOTSUP);
if (*dev->dev_ops->attr_get == NULL)
return -ENOTSUP;
return (*dev->dev_ops->attr_get)(dev, attr_name, attr_value);
}
@ -200,7 +209,8 @@ rte_rawdev_set_attr(uint16_t dev_id,
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->attr_set, -ENOTSUP);
if (*dev->dev_ops->attr_set == NULL)
return -ENOTSUP;
return (*dev->dev_ops->attr_set)(dev, attr_name, attr_value);
}
@ -215,7 +225,8 @@ rte_rawdev_enqueue_buffers(uint16_t dev_id,
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->enqueue_bufs, -ENOTSUP);
if (*dev->dev_ops->enqueue_bufs == NULL)
return -ENOTSUP;
return (*dev->dev_ops->enqueue_bufs)(dev, buffers, count, context);
}
@ -230,7 +241,8 @@ rte_rawdev_dequeue_buffers(uint16_t dev_id,
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dequeue_bufs, -ENOTSUP);
if (*dev->dev_ops->dequeue_bufs == NULL)
return -ENOTSUP;
return (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context);
}
@ -242,7 +254,8 @@ rte_rawdev_dump(uint16_t dev_id, FILE *f)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
if (*dev->dev_ops->dump == NULL)
return -ENOTSUP;
return (*dev->dev_ops->dump)(dev, f);
}
@ -251,7 +264,8 @@ xstats_get_count(uint16_t dev_id)
{
struct rte_rawdev *dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
if (*dev->dev_ops->xstats_get_names == NULL)
return -ENOTSUP;
return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
}
@ -273,7 +287,8 @@ rte_rawdev_xstats_names_get(uint16_t dev_id,
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
if (*dev->dev_ops->xstats_get_names == NULL)
return -ENOTSUP;
return (*dev->dev_ops->xstats_get_names)(dev, xstats_names, size);
}
@ -287,7 +302,8 @@ rte_rawdev_xstats_get(uint16_t dev_id,
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
const struct rte_rawdev *dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get, -ENOTSUP);
if (*dev->dev_ops->xstats_get == NULL)
return -ENOTSUP;
return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
}
@ -306,7 +322,8 @@ rte_rawdev_xstats_by_name_get(uint16_t dev_id,
id = &temp; /* driver never gets a NULL value */
/* implemented by driver */
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_by_name, -ENOTSUP);
if (*dev->dev_ops->xstats_get_by_name == NULL)
return -ENOTSUP;
return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
}
@ -317,7 +334,8 @@ rte_rawdev_xstats_reset(uint16_t dev_id,
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
struct rte_rawdev *dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_reset, -ENOTSUP);
if (*dev->dev_ops->xstats_reset == NULL)
return -ENOTSUP;
return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
}
@ -327,7 +345,8 @@ rte_rawdev_firmware_status_get(uint16_t dev_id, rte_rawdev_obj_t status_info)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
struct rte_rawdev *dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_status_get, -ENOTSUP);
if (*dev->dev_ops->firmware_status_get == NULL)
return -ENOTSUP;
return (*dev->dev_ops->firmware_status_get)(dev, status_info);
}
@ -337,7 +356,8 @@ rte_rawdev_firmware_version_get(uint16_t dev_id, rte_rawdev_obj_t version_info)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
struct rte_rawdev *dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_version_get, -ENOTSUP);
if (*dev->dev_ops->firmware_version_get == NULL)
return -ENOTSUP;
return (*dev->dev_ops->firmware_version_get)(dev, version_info);
}
@ -350,7 +370,8 @@ rte_rawdev_firmware_load(uint16_t dev_id, rte_rawdev_obj_t firmware_image)
if (!firmware_image)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_load, -ENOTSUP);
if (*dev->dev_ops->firmware_load == NULL)
return -ENOTSUP;
return (*dev->dev_ops->firmware_load)(dev, firmware_image);
}
@ -360,7 +381,8 @@ rte_rawdev_firmware_unload(uint16_t dev_id)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
struct rte_rawdev *dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->firmware_load, -ENOTSUP);
if (*dev->dev_ops->firmware_load == NULL)
return -ENOTSUP;
return (*dev->dev_ops->firmware_unload)(dev);
}
@ -370,7 +392,8 @@ rte_rawdev_selftest(uint16_t dev_id)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
struct rte_rawdev *dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
if (*dev->dev_ops->dev_selftest == NULL)
return -ENOTSUP;
return (*dev->dev_ops->dev_selftest)(dev_id);
}
@ -435,7 +458,8 @@ rte_rawdev_close(uint16_t dev_id)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
if (*dev->dev_ops->dev_close == NULL)
return -ENOTSUP;
/* Device must be stopped before it can be closed */
if (dev->started == 1) {
RTE_RDEV_ERR("Device %u must be stopped before closing",
@ -454,7 +478,8 @@ rte_rawdev_reset(uint16_t dev_id)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
if (*dev->dev_ops->dev_reset == NULL)
return -ENOTSUP;
/* Reset is not dependent on state of the device */
return (*dev->dev_ops->dev_reset)(dev);
}

View File

@ -189,7 +189,8 @@ regexdev_info_get(uint8_t dev_id, struct rte_regexdev_info *dev_info)
if (dev_info == NULL)
return -EINVAL;
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
if (*dev->dev_ops->dev_info_get == NULL)
return -ENOTSUP;
return (*dev->dev_ops->dev_info_get)(dev, dev_info);
}
@ -211,7 +212,8 @@ rte_regexdev_configure(uint8_t dev_id, const struct rte_regexdev_config *cfg)
if (cfg == NULL)
return -EINVAL;
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
if (*dev->dev_ops->dev_configure == NULL)
return -ENOTSUP;
if (dev->data->dev_started) {
RTE_REGEXDEV_LOG
(ERR, "Dev %u must be stopped to allow configuration\n",
@ -301,7 +303,8 @@ rte_regexdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_qp_setup, -ENOTSUP);
if (*dev->dev_ops->dev_qp_setup == NULL)
return -ENOTSUP;
if (dev->data->dev_started) {
RTE_REGEXDEV_LOG
(ERR, "Dev %u must be stopped to allow configuration\n",
@ -332,7 +335,8 @@ rte_regexdev_start(uint8_t dev_id)
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (*dev->dev_ops->dev_start == NULL)
return -ENOTSUP;
ret = (*dev->dev_ops->dev_start)(dev);
if (ret == 0)
dev->data->dev_started = 1;
@ -346,7 +350,8 @@ rte_regexdev_stop(uint8_t dev_id)
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
if (*dev->dev_ops->dev_stop == NULL)
return -ENOTSUP;
(*dev->dev_ops->dev_stop)(dev);
dev->data->dev_started = 0;
return 0;
@ -359,7 +364,8 @@ rte_regexdev_close(uint8_t dev_id)
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
if (*dev->dev_ops->dev_close == NULL)
return -ENOTSUP;
(*dev->dev_ops->dev_close)(dev);
dev->data->dev_started = 0;
dev->state = RTE_REGEXDEV_UNUSED;
@ -374,7 +380,8 @@ rte_regexdev_attr_get(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_attr_get, -ENOTSUP);
if (*dev->dev_ops->dev_attr_get == NULL)
return -ENOTSUP;
if (attr_value == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d attribute value can't be NULL\n",
dev_id);
@ -391,7 +398,8 @@ rte_regexdev_attr_set(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_attr_set, -ENOTSUP);
if (*dev->dev_ops->dev_attr_set == NULL)
return -ENOTSUP;
if (attr_value == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d attribute value can't be NULL\n",
dev_id);
@ -409,7 +417,8 @@ rte_regexdev_rule_db_update(uint8_t dev_id,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_rule_db_update, -ENOTSUP);
if (*dev->dev_ops->dev_rule_db_update == NULL)
return -ENOTSUP;
if (rules == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d rules can't be NULL\n",
dev_id);
@ -425,8 +434,8 @@ rte_regexdev_rule_db_compile_activate(uint8_t dev_id)
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_rule_db_compile_activate,
-ENOTSUP);
if (*dev->dev_ops->dev_rule_db_compile_activate == NULL)
return -ENOTSUP;
return (*dev->dev_ops->dev_rule_db_compile_activate)(dev);
}
@ -438,8 +447,8 @@ rte_regexdev_rule_db_import(uint8_t dev_id, const char *rule_db,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_db_import,
-ENOTSUP);
if (*dev->dev_ops->dev_db_import == NULL)
return -ENOTSUP;
if (rule_db == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d rules can't be NULL\n",
dev_id);
@ -455,8 +464,8 @@ rte_regexdev_rule_db_export(uint8_t dev_id, char *rule_db)
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_db_export,
-ENOTSUP);
if (*dev->dev_ops->dev_db_export == NULL)
return -ENOTSUP;
return (*dev->dev_ops->dev_db_export)(dev, rule_db);
}
@ -468,8 +477,8 @@ rte_regexdev_xstats_names_get(uint8_t dev_id,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_names_get,
-ENOTSUP);
if (*dev->dev_ops->dev_xstats_names_get == NULL)
return -ENOTSUP;
if (xstats_map == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d xstats map can't be NULL\n",
dev_id);
@ -486,7 +495,8 @@ rte_regexdev_xstats_get(uint8_t dev_id, const uint16_t *ids,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_get, -ENOTSUP);
if (*dev->dev_ops->dev_xstats_get == NULL)
return -ENOTSUP;
if (ids == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d ids can't be NULL\n", dev_id);
return -EINVAL;
@ -506,8 +516,8 @@ rte_regexdev_xstats_by_name_get(uint8_t dev_id, const char *name,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_by_name_get,
-ENOTSUP);
if (*dev->dev_ops->dev_xstats_by_name_get == NULL)
return -ENOTSUP;
if (name == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d name can't be NULL\n", dev_id);
return -EINVAL;
@ -531,7 +541,8 @@ rte_regexdev_xstats_reset(uint8_t dev_id, const uint16_t *ids,
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_xstats_reset, -ENOTSUP);
if (*dev->dev_ops->dev_xstats_reset == NULL)
return -ENOTSUP;
if (ids == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d ids can't be NULL\n", dev_id);
return -EINVAL;
@ -546,7 +557,8 @@ rte_regexdev_selftest(uint8_t dev_id)
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
if (*dev->dev_ops->dev_selftest == NULL)
return -ENOTSUP;
return (*dev->dev_ops->dev_selftest)(dev);
}
@ -557,7 +569,8 @@ rte_regexdev_dump(uint8_t dev_id, FILE *f)
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_regex_devices[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_dump, -ENOTSUP);
if (*dev->dev_ops->dev_dump == NULL)
return -ENOTSUP;
if (f == NULL) {
RTE_REGEXDEV_LOG(ERR, "Dev %d file can't be NULL\n", dev_id);
return -EINVAL;

View File

@ -1473,7 +1473,8 @@ rte_regexdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_regexdev *dev = &rte_regex_devices[dev_id];
#ifdef RTE_LIBRTE_REGEXDEV_DEBUG
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
RTE_FUNC_PTR_OR_ERR_RET(*dev->enqueue, -ENOTSUP);
if (*dev->enqueue == NULL)
return -ENOTSUP;
if (qp_id >= dev->data->dev_conf.nb_queue_pairs) {
RTE_REGEXDEV_LOG(ERR, "Invalid queue %d\n", qp_id);
return -EINVAL;
@ -1532,7 +1533,8 @@ rte_regexdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_regexdev *dev = &rte_regex_devices[dev_id];
#ifdef RTE_LIBRTE_REGEXDEV_DEBUG
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dequeue, -ENOTSUP);
if (*dev->dequeue == NULL)
return -ENOTSUP;
if (qp_id >= dev->data->dev_conf.nb_queue_pairs) {
RTE_REGEXDEV_LOG(ERR, "Invalid queue %d\n", qp_id);
return -EINVAL;

View File

@ -134,7 +134,8 @@ __rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
RTE_PTR_OR_ERR_RET(instance, -EINVAL);
RTE_PTR_OR_ERR_RET(instance->ops, -EINVAL);
#endif
RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->set_pkt_metadata, -ENOTSUP);
if (*instance->ops->set_pkt_metadata == NULL)
return -ENOTSUP;
return instance->ops->set_pkt_metadata(instance->device,
sess, m, params);
}
@ -148,7 +149,8 @@ __rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md)
RTE_PTR_OR_ERR_RET(instance, NULL);
RTE_PTR_OR_ERR_RET(instance->ops, NULL);
#endif
RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->get_userdata, NULL);
if (*instance->ops->get_userdata == NULL)
return NULL;
if (instance->ops->get_userdata(instance->device, md, &userdata))
return NULL;

View File

@ -266,7 +266,8 @@ rte_vdpa_get_stats_names(struct rte_vdpa_device *dev,
if (!dev)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP);
if (dev->ops->get_stats_names == NULL)
return -ENOTSUP;
return dev->ops->get_stats_names(dev, stats_names, size);
}
@ -278,7 +279,8 @@ rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
if (!dev || !stats || !n)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP);
if (dev->ops->get_stats == NULL)
return -ENOTSUP;
return dev->ops->get_stats(dev, qid, stats, n);
}
@ -289,7 +291,8 @@ rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid)
if (!dev)
return -EINVAL;
RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP);
if (dev->ops->reset_stats == NULL)
return -ENOTSUP;
return dev->ops->reset_stats(dev, qid);
}

View File

@ -3366,8 +3366,10 @@ int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable)
q_last = qid;
}
RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
if (vdpa_dev->ops->get_vfio_device_fd == NULL)
return -ENOTSUP;
if (vdpa_dev->ops->get_notify_area == NULL)
return -ENOTSUP;
vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
if (vfio_device_fd < 0)