ethdev: copy fast-path API into separate structure

Copy public function pointers (rx_pkt_burst(), etc.) and related
pointers to internal data from rte_eth_dev structure into a
separate flat array. That array will remain in a public header.
The intention here is to make rte_eth_dev and related structures internal.
That should allow future possible changes to core eth_dev structures
to be transparent to the user and help to avoid ABI/API breakages.
The plan is to keep minimal part of data from rte_eth_dev public,
so we still can use inline functions for fast-path calls
(like rte_eth_rx_burst(), etc.) to avoid/minimize slowdown.
The whole idea beyond this new schema:
1. PMDs keep to setup fast-path function pointers and related data
   inside rte_eth_dev struct in the same way they did it before.
2. Inside rte_eth_dev_start() and inside rte_eth_dev_probing_finish()
   (for secondary process) we call eth_dev_fp_ops_setup, which
   copies these function and data pointers into rte_eth_fp_ops[port_id].
3. Inside rte_eth_dev_stop() and inside rte_eth_dev_release_port()
   we call eth_dev_fp_ops_reset(), which resets rte_eth_fp_ops[port_id]
   into some dummy values.
4. fast-path ethdev API (rte_eth_rx_burst(), etc.) will use that new
   flat array to call PMD specific functions.
That approach should allow us to make rte_eth_devices[] private
without introducing regression and help to avoid changes in drivers code.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Tested-by: Feifei Wang <feifei.wang2@arm.com>
This commit is contained in:
Konstantin Ananyev 2021-10-13 14:37:01 +01:00 committed by Ferruh Yigit
parent 8d7d4fcdca
commit c87d435a4d
4 changed files with 145 additions and 0 deletions

View File

@ -174,3 +174,55 @@ rte_eth_devargs_parse_representor_ports(char *str, void *data)
RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str); RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
return str == NULL ? -1 : 0; return str == NULL ? -1 : 0;
} }
static uint16_t
dummy_eth_rx_burst(__rte_unused void *rxq,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
{
RTE_ETHDEV_LOG(ERR, "rx_pkt_burst for not ready port\n");
rte_errno = ENOTSUP;
return 0;
}
static uint16_t
dummy_eth_tx_burst(__rte_unused void *txq,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
{
RTE_ETHDEV_LOG(ERR, "tx_pkt_burst for not ready port\n");
rte_errno = ENOTSUP;
return 0;
}
void
eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
{
static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
static const struct rte_eth_fp_ops dummy_ops = {
.rx_pkt_burst = dummy_eth_rx_burst,
.tx_pkt_burst = dummy_eth_tx_burst,
.rxq = {.data = dummy_data, .clbk = dummy_data,},
.txq = {.data = dummy_data, .clbk = dummy_data,},
};
*fpo = dummy_ops;
}
void
eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
const struct rte_eth_dev *dev)
{
fpo->rx_pkt_burst = dev->rx_pkt_burst;
fpo->tx_pkt_burst = dev->tx_pkt_burst;
fpo->tx_pkt_prepare = dev->tx_pkt_prepare;
fpo->rx_queue_count = dev->rx_queue_count;
fpo->rx_descriptor_status = dev->rx_descriptor_status;
fpo->tx_descriptor_status = dev->tx_descriptor_status;
fpo->rxq.data = dev->data->rx_queues;
fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
fpo->txq.data = dev->data->tx_queues;
fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
}

View File

@ -26,4 +26,11 @@ eth_find_device(const struct rte_eth_dev *_start, rte_eth_cmp_t cmp,
/* Parse devargs value for representor parameter. */ /* Parse devargs value for representor parameter. */
int rte_eth_devargs_parse_representor_ports(char *str, void *data); int rte_eth_devargs_parse_representor_ports(char *str, void *data);
/* reset eth fast-path API to dummy values */
void eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo);
/* setup eth fast-path API to ethdev values */
void eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
const struct rte_eth_dev *dev);
#endif /* _ETH_PRIVATE_H_ */ #endif /* _ETH_PRIVATE_H_ */

View File

@ -44,6 +44,9 @@
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
/* public fast-path API */
struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
/* spinlock for eth device callbacks */ /* spinlock for eth device callbacks */
static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@ -579,6 +582,8 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
rte_eth_dev_callback_process(eth_dev, rte_eth_dev_callback_process(eth_dev,
RTE_ETH_EVENT_DESTROY, NULL); RTE_ETH_EVENT_DESTROY, NULL);
eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
rte_spinlock_lock(&eth_dev_shared_data->ownership_lock); rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
eth_dev->state = RTE_ETH_DEV_UNUSED; eth_dev->state = RTE_ETH_DEV_UNUSED;
@ -1792,6 +1797,9 @@ rte_eth_dev_start(uint16_t port_id)
(*dev->dev_ops->link_update)(dev, 0); (*dev->dev_ops->link_update)(dev, 0);
} }
/* expose selection of PMD fast-path functions */
eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
rte_ethdev_trace_start(port_id); rte_ethdev_trace_start(port_id);
return 0; return 0;
} }
@ -1814,6 +1822,9 @@ rte_eth_dev_stop(uint16_t port_id)
return 0; return 0;
} }
/* point fast-path functions to dummy ones */
eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
dev->data->dev_started = 0; dev->data->dev_started = 0;
ret = (*dev->dev_ops->dev_stop)(dev); ret = (*dev->dev_ops->dev_stop)(dev);
rte_ethdev_trace_stop(port_id, ret); rte_ethdev_trace_stop(port_id, ret);
@ -4477,6 +4488,14 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
queue_idx, tx_rate)); queue_idx, tx_rate));
} }
RTE_INIT(eth_dev_init_fp_ops)
{
uint32_t i;
for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
}
RTE_INIT(eth_dev_init_cb_lists) RTE_INIT(eth_dev_init_cb_lists)
{ {
uint16_t i; uint16_t i;
@ -4645,6 +4664,14 @@ rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
if (dev == NULL) if (dev == NULL)
return; return;
/*
* for secondary process, at that point we expect device
* to be already 'usable', so shared data and all function pointers
* for fast-path devops have to be setup properly inside rte_eth_dev.
*/
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
dev->state = RTE_ETH_DEV_ATTACHED; dev->state = RTE_ETH_DEV_ATTACHED;

View File

@ -50,6 +50,65 @@ typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);
typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset); typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset);
/**< @internal Check the status of a Tx descriptor */ /**< @internal Check the status of a Tx descriptor */
/**
* @internal
* Structure used to hold opaque pointers to internal ethdev Rx/Tx
* queues data.
* The main purpose to expose these pointers at all - allow compiler
* to fetch this data for fast-path ethdev inline functions in advance.
*/
struct rte_ethdev_qdata {
/** points to array of internal queue data pointers */
void **data;
/** points to array of queue callback data pointers */
void **clbk;
};
/**
* @internal
* fast-path ethdev functions and related data are hold in a flat array.
* One entry per ethdev.
* On 64-bit systems contents of this structure occupy exactly two 64B lines.
* On 32-bit systems contents of this structure fits into one 64B line.
*/
struct rte_eth_fp_ops {
/**@{*/
/**
* Rx fast-path functions and related data.
* 64-bit systems: occupies first 64B line
*/
/** PMD receive function. */
eth_rx_burst_t rx_pkt_burst;
/** Get the number of used RX descriptors. */
eth_rx_queue_count_t rx_queue_count;
/** Check the status of a Rx descriptor. */
eth_rx_descriptor_status_t rx_descriptor_status;
/** Rx queues data. */
struct rte_ethdev_qdata rxq;
uintptr_t reserved1[3];
/**@}*/
/**@{*/
/**
* Tx fast-path functions and related data.
* 64-bit systems: occupies second 64B line
*/
/** PMD transmit function. */
eth_tx_burst_t tx_pkt_burst;
/** PMD transmit prepare function. */
eth_tx_prep_t tx_pkt_prepare;
/** Check the status of a Tx descriptor. */
eth_tx_descriptor_status_t tx_descriptor_status;
/** Tx queues data. */
struct rte_ethdev_qdata txq;
uintptr_t reserved2[3];
/**@}*/
} __rte_cache_aligned;
extern struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
/** /**
* @internal * @internal