ethdev: remove duplicated debug functions

The functions for rx/tx burst, for rx_queue_count and descriptor_done in
the ethdev library all had two copies of the code. One copy in
rte_ethdev.h was inlined for performance, while a second was in
rte_ethdev.c for debugging purposes only. We can eliminate the second
copy of the functions by moving the additional debug checks into the
copies of the functions in the header file. [Any compilation for
debugging at optimization level 0 will not inline the function so the
result should be same as when the function was in the .c file.]

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
This commit is contained in:
Bruce Richardson 2015-11-24 17:37:56 +00:00 committed by Thomas Monjalon
parent b974e4a40c
commit 71594065e2
4 changed files with 60 additions and 136 deletions

@ -278,6 +278,11 @@ ABI Changes
* The new fields rx_desc_lim and tx_desc_lim are added into rte_eth_dev_info
structure.
* For debug builds, the functions rte_eth_rx_burst(), rte_eth_tx_burst()
rte_eth_rx_descriptor_done() and rte_eth_rx_queue_count() will
no longer be separate functions in the DPDK libraries. Instead, they will
only be present in the rte_ethdev.h header file.
* The maximum number of queues per port CONFIG_RTE_MAX_QUEUES_PER_PORT is
increased to 1024.

@ -2451,70 +2451,6 @@ rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
uint16_t
rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
if (queue_id >= dev->data->nb_rx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
return 0;
}
return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
rx_pkts, nb_pkts);
}
uint16_t
rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
if (queue_id >= dev->data->nb_tx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
return 0;
}
return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
tx_pkts, nb_pkts);
}
uint32_t
rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
int
rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
return (*dev->dev_ops->rx_descriptor_done)(dev->data->rx_queues[queue_id],
offset);
}
#endif
int
rte_eth_dev_callback_register(uint8_t port_id,
enum rte_eth_event_type event,

@ -2492,18 +2492,21 @@ extern int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
* of pointers to *rte_mbuf* structures effectively supplied to the
* *rx_pkts* array.
*/
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
extern uint16_t rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
#else
static inline uint16_t
rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
{
struct rte_eth_dev *dev;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
dev = &rte_eth_devices[port_id];
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
if (queue_id >= dev->data->nb_rx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
return 0;
}
#endif
int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
rx_pkts, nb_pkts);
@ -2521,7 +2524,6 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
return nb_rx;
}
#endif
/**
* Get the number of used descriptors in a specific queue
@ -2533,18 +2535,16 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
* @return
* The number of used descriptors in the specific queue.
*/
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
extern uint32_t rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id);
#else
static inline uint32_t
rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
{
struct rte_eth_dev *dev;
dev = &rte_eth_devices[port_id];
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, 0);
#endif
return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
#endif
/**
* Check if the DD bit of the specific RX descriptor in the queue has been set
@ -2560,21 +2560,17 @@ rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
* - (0) if the specific DD bit is not set.
* - (-ENODEV) if *port_id* invalid.
*/
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
extern int rte_eth_rx_descriptor_done(uint8_t port_id,
uint16_t queue_id,
uint16_t offset);
#else
static inline int
rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
{
struct rte_eth_dev *dev;
dev = &rte_eth_devices[port_id];
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
#endif
return (*dev->dev_ops->rx_descriptor_done)( \
dev->data->rx_queues[queue_id], offset);
}
#endif
/**
* Send a burst of output packets on a transmit queue of an Ethernet device.
@ -2634,17 +2630,21 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
* the transmit ring. The return value can be less than the value of the
* *tx_pkts* parameter when the transmit ring is full or has been filled up.
*/
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
extern uint16_t rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
#else
static inline uint16_t
rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct rte_eth_dev *dev;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
dev = &rte_eth_devices[port_id];
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
if (queue_id >= dev->data->nb_tx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
return 0;
}
#endif
#ifdef RTE_ETHDEV_RXTX_CALLBACKS
struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
@ -2660,7 +2660,6 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
}
#endif
/**
* The eth device event type for interrupt, and maybe others in the future.

@ -1,4 +1,4 @@
DPDK_2.0 {
DPDK_2.2 {
global:
_rte_eth_dev_callback_process;
@ -7,6 +7,7 @@ DPDK_2.0 {
rte_eth_allmulticast_disable;
rte_eth_allmulticast_enable;
rte_eth_allmulticast_get;
rte_eth_copy_pci_info;
rte_eth_dev_allocate;
rte_eth_dev_allocated;
rte_eth_dev_attach;
@ -24,6 +25,7 @@ DPDK_2.0 {
rte_eth_dev_close;
rte_eth_dev_configure;
rte_eth_dev_count;
rte_eth_dev_default_mac_addr_set;
rte_eth_dev_detach;
rte_eth_dev_fdir_add_perfect_filter;
rte_eth_dev_fdir_add_signature_filter;
@ -37,9 +39,16 @@ DPDK_2.0 {
rte_eth_dev_filter_supported;
rte_eth_dev_flow_ctrl_get;
rte_eth_dev_flow_ctrl_set;
rte_eth_dev_get_dcb_info;
rte_eth_dev_get_eeprom;
rte_eth_dev_get_eeprom_length;
rte_eth_dev_get_mtu;
rte_eth_dev_get_reg_info;
rte_eth_dev_get_reg_length;
rte_eth_dev_get_vlan_offload;
rte_eth_devices;
rte_eth_dev_info_get;
rte_eth_dev_is_valid_port;
rte_eth_dev_mac_addr_add;
rte_eth_dev_mac_addr_remove;
rte_eth_dev_priority_flow_ctrl_set;
@ -48,10 +57,16 @@ DPDK_2.0 {
rte_eth_dev_rss_hash_update;
rte_eth_dev_rss_reta_query;
rte_eth_dev_rss_reta_update;
rte_eth_dev_rx_intr_ctl;
rte_eth_dev_rx_intr_ctl_q;
rte_eth_dev_rx_intr_disable;
rte_eth_dev_rx_intr_enable;
rte_eth_dev_rx_queue_start;
rte_eth_dev_rx_queue_stop;
rte_eth_dev_set_eeprom;
rte_eth_dev_set_link_down;
rte_eth_dev_set_link_up;
rte_eth_dev_set_mc_addr_list;
rte_eth_dev_set_mtu;
rte_eth_dev_set_rx_queue_stats_mapping;
rte_eth_dev_set_tx_queue_stats_mapping;
@ -74,7 +89,7 @@ DPDK_2.0 {
rte_eth_dev_udp_tunnel_delete;
rte_eth_dev_vlan_filter;
rte_eth_dev_wd_timeout_store;
rte_eth_devices;
rte_eth_dma_zone_reserve;
rte_eth_driver_register;
rte_eth_led_off;
rte_eth_led_on;
@ -89,55 +104,24 @@ DPDK_2.0 {
rte_eth_promiscuous_get;
rte_eth_remove_rx_callback;
rte_eth_remove_tx_callback;
rte_eth_rx_burst;
rte_eth_rx_descriptor_done;
rte_eth_rx_queue_count;
rte_eth_rx_queue_info_get;
rte_eth_rx_queue_setup;
rte_eth_set_queue_rate_limit;
rte_eth_set_vf_rate_limit;
rte_eth_stats;
rte_eth_stats_get;
rte_eth_stats_reset;
rte_eth_tx_burst;
rte_eth_timesync_adjust_time;
rte_eth_timesync_disable;
rte_eth_timesync_enable;
rte_eth_timesync_read_rx_timestamp;
rte_eth_timesync_read_time;
rte_eth_timesync_read_tx_timestamp;
rte_eth_timesync_write_time;
rte_eth_tx_queue_info_get;
rte_eth_tx_queue_setup;
rte_eth_xstats_get;
rte_eth_xstats_reset;
local: *;
};
DPDK_2.1 {
global:
rte_eth_dev_default_mac_addr_set;
rte_eth_dev_get_eeprom;
rte_eth_dev_get_eeprom_length;
rte_eth_dev_get_reg_info;
rte_eth_dev_get_reg_length;
rte_eth_dev_is_valid_port;
rte_eth_dev_rx_intr_ctl;
rte_eth_dev_rx_intr_ctl_q;
rte_eth_dev_rx_intr_disable;
rte_eth_dev_rx_intr_enable;
rte_eth_dev_set_eeprom;
rte_eth_dev_set_mc_addr_list;
rte_eth_timesync_disable;
rte_eth_timesync_enable;
rte_eth_timesync_read_rx_timestamp;
rte_eth_timesync_read_tx_timestamp;
} DPDK_2.0;
DPDK_2.2 {
global:
rte_eth_copy_pci_info;
rte_eth_dev_get_dcb_info;
rte_eth_dma_zone_reserve;
rte_eth_rx_queue_info_get;
rte_eth_timesync_adjust_time;
rte_eth_timesync_read_time;
rte_eth_timesync_write_time;
rte_eth_tx_queue_info_get;
} DPDK_2.1;