ethdev: remove underscore prefix from internal API

'_rte_eth_dev_callback_process()' & '_rte_eth_dev_reset()' internal APIs
has unconventional underscore ('_') prefix.
Although this is not documented most probably this is to mark them as
internal. Since we have '__rte_internal' flag to mark this, removing '_'
from API names.

For '_rte_eth_dev_reset()', there is already a public API named
'rte_eth_dev_reset()', so renaming '_rte_eth_dev_reset()' to
'rte_eth_dev_internal_reset'.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: David Marchand <david.marchand@redhat.com>
Acked-by: Sachin Saxena <sachin.saxena@nxp.com>
This commit is contained in:
Ferruh Yigit 2020-09-09 14:01:48 +01:00
parent 8682e492ed
commit 5723fbed4f
40 changed files with 95 additions and 102 deletions

@ -474,8 +474,8 @@ virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
vrtl_eth_dev->data->dev_link.link_status = link_status;
_rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
}
int

@ -600,9 +600,9 @@ thread safety all these operations should be called from the same thread.
For example when PF is reset, the PF sends a message to notify VFs of
this event and also trigger an interrupt to VFs. Then in the interrupt
service routine the VFs detects this notification message and calls
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL).
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL).
This means that a PF reset triggers an RTE_ETH_EVENT_INTR_RESET
event within VFs. The function _rte_eth_dev_callback_process() will
event within VFs. The function rte_eth_dev_callback_process() will
call the registered callback function. The callback function can trigger
the application to handle all operations the VF reset requires including
stopping Rx/Tx queues and calling rte_eth_dev_reset().

@ -126,6 +126,11 @@ API Changes
* ethdev: ``rte_eth_rx_descriptor_done()`` API has been deprecated.
* Renamed internal ethdev APIs:
* ``_rte_eth_dev_callback_process()`` -> ``rte_eth_dev_callback_process()``
* ``_rte_eth_dev_reset`` -> ``rte_eth_dev_internal_reset()``
* rawdev: Added a structure size parameter to the functions
``rte_rawdev_queue_setup()``, ``rte_rawdev_queue_conf_get()``,
``rte_rawdev_info_get()`` and ``rte_rawdev_configure()``,

@ -1395,8 +1395,7 @@ atl_dev_interrupt_action(struct rte_eth_dev *dev,
/* Notify userapp if link status changed */
if (!atl_dev_link_update(dev, 0)) {
atl_dev_link_status_print(dev);
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
} else {
if (hw->aq_fw_ops->send_macsec_req == NULL)
goto done;
@ -1422,7 +1421,7 @@ atl_dev_interrupt_action(struct rte_eth_dev *dev,
resp.stats.egress_expired ||
resp.stats.ingress_expired) {
PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
_rte_eth_dev_callback_process(dev,
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_MACSEC, NULL);
}
}

@ -1528,9 +1528,9 @@ out:
new.link_speed != eth_dev->data->dev_link.link_speed) {
rte_eth_linkstatus_set(eth_dev, &new);
_rte_eth_dev_callback_process(eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
bnxt_print_link_info(eth_dev);
}

@ -28,8 +28,8 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg)
ret_param.vf_id = vf_id;
ret_param.msg = msg;
_rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX,
&ret_param);
rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX,
&ret_param);
/* Default to approve */
if (ret_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED)

@ -1879,7 +1879,7 @@ slave_remove(struct bond_dev_private *internals,
internals->slave_count--;
/* force reconfiguration of slave interfaces */
_rte_eth_dev_reset(slave_eth_dev);
rte_eth_dev_internal_reset(slave_eth_dev);
}
static void
@ -2765,7 +2765,7 @@ bond_ethdev_delayed_lsc_propagation(void *arg)
if (arg == NULL)
return;
_rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
RTE_ETH_EVENT_INTR_LSC, NULL);
}
@ -2900,7 +2900,7 @@ link_update:
bond_ethdev_delayed_lsc_propagation,
(void *)bonded_eth_dev);
else
_rte_eth_dev_callback_process(bonded_eth_dev,
rte_eth_dev_callback_process(bonded_eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
@ -2910,7 +2910,7 @@ link_update:
bond_ethdev_delayed_lsc_propagation,
(void *)bonded_eth_dev);
else
_rte_eth_dev_callback_process(bonded_eth_dev,
rte_eth_dev_callback_process(bonded_eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}

@ -335,7 +335,7 @@ static void dpaa_interrupt_handler(void *param)
if (bytes_read < 0)
DPAA_PMD_ERR("Error reading eventfd\n");
dpaa_eth_link_update(dev, 0);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int dpaa_eth_dev_start(struct rte_eth_dev *dev)

@ -1065,8 +1065,7 @@ dpaa2_interrupt_handler(void *param)
clear = DPNI_IRQ_EVENT_LINK_CHANGED;
dpaa2_dev_link_update(dev, 0);
/* calling all the apps registered for link status event */
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
out:
ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,

@ -1630,7 +1630,7 @@ eth_em_interrupt_handler(void *param)
eth_em_interrupt_get_status(dev);
eth_em_interrupt_action(dev, dev->intr_handle);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int

@ -2920,8 +2920,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
pci_dev->addr.bus,
pci_dev->addr.devid,
pci_dev->addr.function);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
return 0;
@ -2983,8 +2982,8 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
/* dummy mbx read to ack pf */
if (mbx->ops.read(hw, &in_msg, 1, 0))
return;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
}
}

@ -1621,7 +1621,7 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
if (unlikely(adapter->trigger_reset)) {
PMD_DRV_LOG(ERR, "Trigger reset is on\n");
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
}
}
@ -2867,7 +2867,7 @@ static void ena_update_on_link_change(void *adapter_data,
adapter->link_status = status;
ena_link_update(eth_dev, 0);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static void ena_notification(void *data,

@ -445,7 +445,7 @@ enic_intr_handler(void *arg)
vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
enic_link_update(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
/* Re-enable irq in case of INTx */
rte_intr_ack(&enic->pdev->intr_handle);

@ -602,9 +602,9 @@ failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
ret = dev->dev_ops->link_update(dev, 0);
/* We must pass on the LSC event */
if (ret)
return _rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
return rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
else
return 0;
}

@ -2621,7 +2621,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
true);
dev_info->sm_down = 0;
_rte_eth_dev_callback_process(dev,
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
@ -2635,8 +2635,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
if (err == FM10K_ERR_RESET_REQUESTED) {
PMD_INIT_LOG(INFO, "INT: Switch is down");
dev_info->sm_down = 1;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/* Handle SRAM error */
@ -2703,8 +2702,7 @@ fm10k_dev_interrupt_handler_vf(void *param)
/* Setting reset flag */
dev_info->sm_down = 1;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
if (dev_info->sm_down == 1 &&
@ -2732,8 +2730,7 @@ fm10k_dev_interrupt_handler_vf(void *param)
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
dev_info->sm_down = 0;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/* Re-enable interrupt from device side */

@ -1362,9 +1362,9 @@ static void hinic_lsc_process(struct hinic_hwdev *hwdev,
ret = hinic_link_event_process(hwdev, rte_dev, status);
/* check if link has changed, notify callback */
if (ret == 0)
_rte_eth_dev_callback_process(rte_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(rte_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev,

@ -6846,7 +6846,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
case i40e_aqc_opc_get_link_status:
ret = i40e_dev_link_update(dev, 0);
if (!ret)
_rte_eth_dev_callback_process(dev,
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
break;
default:

@ -1378,7 +1378,7 @@ i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
switch (pf_msg->event) {
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
_rte_eth_dev_callback_process(dev,
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
@ -1425,7 +1425,7 @@ i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
}
i40evf_dev_link_update(dev, 0);
_rte_eth_dev_callback_process(dev,
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:

@ -1363,7 +1363,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
* do nothing and send not_supported to VF. As PF must send a response
* to VF and ACK/NACK is not defined.
*/
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
opcode);

@ -186,7 +186,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
switch (pf_msg->event) {
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
@ -201,8 +201,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
vf->link_speed = iavf_convert_link_speed(speed);
}
iavf_dev_link_update(dev, 0);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
break;
case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");

@ -1314,7 +1314,7 @@ ice_handle_aq_msg(struct rte_eth_dev *dev)
case ice_aqc_opc_get_link_status:
ret = ice_link_update(dev, 0);
if (!ret)
_rte_eth_dev_callback_process
rte_eth_dev_callback_process
(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
break;
default:
@ -1379,7 +1379,7 @@ ice_interrupt_handler(void *param)
PMD_DRV_LOG(INFO, "OICR: link state change event");
ret = ice_link_update(dev, 0);
if (!ret)
_rte_eth_dev_callback_process
rte_eth_dev_callback_process
(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
#endif

@ -540,8 +540,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
pci_dev->addr.bus,
pci_dev->addr.devid,
pci_dev->addr.function);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
}

@ -4659,13 +4659,11 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_dev_link_status_print(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
if (intr->flags & IXGBE_FLAG_MACSEC) {
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL);
intr->flags &= ~IXGBE_FLAG_MACSEC;
}
@ -8674,8 +8672,8 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
/* dummy mbx read to ack pf */
if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
return;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
}
}

@ -832,7 +832,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
vfinfo[vf].clear_to_send = true;
/* notify application about VF reset */
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
&ret_param);
return ret;
}
@ -844,8 +844,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
* if 0, do nothing and send ACK to VF
* if ret_param.retval > 1, do nothing and send NAK to VF
*/
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
&ret_param);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
retval = ret_param.retval;

@ -124,9 +124,9 @@ mlx4_link_status_alarm(struct mlx4_priv *priv)
MLX4_ASSERT(priv->intr_alarm == 1);
priv->intr_alarm = 0;
if (intr_conf->lsc && !mlx4_link_status_check(priv))
_rte_eth_dev_callback_process(ETH_DEV(priv),
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(ETH_DEV(priv),
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
/**
@ -207,8 +207,8 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
}
for (i = 0; i != RTE_DIM(caught); ++i)
if (caught[i])
_rte_eth_dev_callback_process(ETH_DEV(priv), type[i],
NULL);
rte_eth_dev_callback_process(ETH_DEV(priv), type[i],
NULL);
}
/**

@ -732,7 +732,7 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh)
dev = &rte_eth_devices[sh->port[i].ih_port_id];
MLX5_ASSERT(dev);
if (dev->data->dev_conf.intr_conf.rmv)
_rte_eth_dev_callback_process
rte_eth_dev_callback_process
(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
}
}
@ -808,7 +808,7 @@ mlx5_dev_interrupt_handler(void *cb_arg)
usleep(0);
continue;
}
_rte_eth_dev_callback_process
rte_eth_dev_callback_process
(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
continue;
}

@ -6078,7 +6078,7 @@ mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
continue;
if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
_rte_eth_dev_callback_process
rte_eth_dev_callback_process
(&rte_eth_devices[sh->port[i].devx_ih_port_id],
RTE_ETH_EVENT_FLOW_AGED, NULL);
age_info->flags = 0;

@ -292,7 +292,7 @@ static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan,
*/
static void hn_rndis_link_alarm(void *arg)
{
_rte_eth_dev_callback_process(arg, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(arg, RTE_ETH_EVENT_INTR_LSC, NULL);
}
void hn_rndis_link_status(struct rte_eth_dev *dev, const void *msg)

@ -1460,7 +1460,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
nfp_net_link_update(dev, 0);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
nfp_net_dev_link_status_print(dev);

@ -227,9 +227,9 @@ octeontx_link_status_poll(void *arg)
octeontx_link_status_update(nic, &link);
octeontx_link_status_print(dev, &link);
rte_eth_linkstatus_set(dev, &link);
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
}

@ -82,7 +82,7 @@ otx2_eth_dev_link_status_update(struct otx2_dev *dev,
rte_eth_linkstatus_set(eth_dev, &eth_link);
/* Set the flag and execute application callbacks */
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int

@ -650,8 +650,7 @@ void qed_link_update(struct ecore_hwfn *hwfn)
struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
if (!qede_link_update(dev, 0))
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int qed_drain(struct ecore_dev *edev)

@ -89,9 +89,9 @@ exit:
sfc_notice(sa, "link status change event: link %s",
sa->eth_dev->data->dev_link.link_status ?
"UP" : "DOWN");
_rte_eth_dev_callback_process(sa->eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(sa->eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
}
@ -131,9 +131,9 @@ sfc_intr_message_handler(void *cb_arg)
exit:
if (lsc_seq != sa->port.lsc_seq) {
sfc_notice(sa, "link status change event");
_rte_eth_dev_callback_process(sa->eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(sa->eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
}

@ -83,9 +83,9 @@ nicvf_interrupt(void *arg)
nicvf_link_status_update(nic, &link);
rte_eth_linkstatus_set(dev, &link);
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
}

@ -832,7 +832,7 @@ new_device(int vid)
VHOST_LOG(INFO, "Vhost device %d created\n", vid);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
return 0;
}
@ -889,7 +889,7 @@ destroy_device(int vid)
VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
eth_vhost_uninstall_intr(eth_dev);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int
@ -968,7 +968,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
VHOST_LOG(INFO, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
return 0;
}

@ -1489,9 +1489,9 @@ virtio_interrupt_handler(void *param)
if (isr & VIRTIO_PCI_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
vtpci_read_dev_config(hw,

@ -1417,9 +1417,9 @@ vmxnet3_process_events(struct rte_eth_dev *dev)
if (events & VMXNET3_ECR_LINK) {
PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
if (vmxnet3_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
/* Check if there is an error on xmit/recv queues */

@ -549,7 +549,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
rte_eth_dev_shared_data_prepare();
if (eth_dev->state != RTE_ETH_DEV_UNUSED)
_rte_eth_dev_callback_process(eth_dev,
rte_eth_dev_callback_process(eth_dev,
RTE_ETH_EVENT_DESTROY, NULL);
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
@ -1486,7 +1486,7 @@ rollback:
}
void
_rte_eth_dev_reset(struct rte_eth_dev *dev)
rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
{
if (dev->data->dev_started) {
RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
@ -4090,7 +4090,7 @@ rte_eth_dev_callback_unregister(uint16_t port_id,
}
int
_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
rte_eth_dev_callback_process(struct rte_eth_dev *dev,
enum rte_eth_event_type event, void *ret_param)
{
struct rte_eth_dev_callback *cb_lst;
@ -4122,7 +4122,7 @@ rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
if (dev == NULL)
return;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
dev->state = RTE_ETH_DEV_ATTACHED;
}

@ -821,7 +821,7 @@ int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
* void
*/
__rte_internal
void _rte_eth_dev_reset(struct rte_eth_dev *dev);
void rte_eth_dev_internal_reset(struct rte_eth_dev *dev);
/**
* @internal Executes all the user application registered callbacks for
@ -841,7 +841,7 @@ void _rte_eth_dev_reset(struct rte_eth_dev *dev);
* int
*/
__rte_internal
int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
int rte_eth_dev_callback_process(struct rte_eth_dev *dev,
enum rte_eth_event_type event, void *ret_param);
/**

@ -230,17 +230,17 @@ EXPERIMENTAL {
INTERNAL {
global:
_rte_eth_dev_callback_process;
_rte_eth_dev_reset;
rte_eth_dev_allocate;
rte_eth_dev_allocated;
rte_eth_dev_attach_secondary;
rte_eth_dev_callback_process;
rte_eth_dev_create;
rte_eth_dev_destroy;
rte_eth_dev_is_rx_hairpin_queue;
rte_eth_dev_is_tx_hairpin_queue;
rte_eth_dev_probing_finish;
rte_eth_dev_release_port;
rte_eth_dev_internal_reset;
rte_eth_devargs_parse;
rte_eth_dma_zone_free;
rte_eth_dma_zone_reserve;