ethdev: introduce hairpin memory capabilities

Before this patch, implementation details and configuration of hairpin
queues were decided internally by the PMD. Applications had no control
over the configuration of Rx and Tx hairpin queues, despite number of
descriptors, explicit Tx flow mode and disabling automatic binding.
This patch addresses that by adding:

- Hairpin queue capabilities reported by PMDs.
- New configuration options for Rx and Tx hairpin queues.

Main goal of this patch is to allow applications to provide
configuration hints regarding placement of hairpin queues.
These hints specify whether buffers of hairpin queues should be placed
in host memory or in dedicated device memory. Different memory options
may have different performance characteristics and hairpin configuration
should be fine-tuned to the specific application and use case.

This patch introduces new hairpin queue configuration options through
rte_eth_hairpin_conf struct, allowing to tune Rx and Tx hairpin queues
memory configuration. Hairpin configuration is extended with the
following fields:

- use_locked_device_memory - If set, PMD will use specialized on-device
  memory to store RX or TX hairpin queue data.
- use_rte_memory - If set, PMD will use DPDK-managed memory to store RX
  or TX hairpin queue data.
- force_memory - If set, PMD will be forced to use provided memory
  settings. If no appropriate resources are available, then device start
  will fail. If unset and no resources are available, PMD will fallback
  to using default type of resource for given queue.

If application chooses to use PMD default memory configuration, all of
these flags should remain unset.

Hairpin capabilities are also extended, to allow verification of support
of given hairpin memory configurations. Struct rte_eth_hairpin_cap is
extended with two additional fields of type rte_eth_hairpin_queue_cap:

- rx_cap - memory capabilities of hairpin RX queues.
- tx_cap - memory capabilities of hairpin TX queues.

Struct rte_eth_hairpin_queue_cap exposes whether given queue type
supports use_locked_device_memory and use_rte_memory flags.

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
This commit is contained in:
Dariusz Sosnowski 2022-10-06 11:00:58 +00:00 committed by Thomas Monjalon
parent 70888c61d9
commit bc705061cb
3 changed files with 121 additions and 2 deletions

View File

@ -77,6 +77,17 @@ New Features
The information demonstrates I/O process which is important for debug.
The dump format is vendor-specific.
* **Added ethdev hairpin memory configuration options.**
Added new configuration flags for hairpin queues in ``rte_eth_hairpin_conf``:
* ``use_locked_device_memory``
* ``use_rte_memory``
* ``force_memory``
Each flag has a corresponding capability flag
in ``struct rte_eth_hairpin_queue_cap``.
* **Added configuration for asynchronous flow connection tracking.**
Added connection tracking action number hint to ``rte_flow_configure``

View File

@ -1968,6 +1968,28 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
conf->peer_count, cap.max_rx_2_tx);
return -EINVAL;
}
if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) {
RTE_ETHDEV_LOG(ERR,
"Attempt to use locked device memory for Rx queue, which is not supported");
return -EINVAL;
}
if (conf->use_rte_memory && !cap.rx_cap.rte_memory) {
RTE_ETHDEV_LOG(ERR,
"Attempt to use DPDK memory for Rx queue, which is not supported");
return -EINVAL;
}
if (conf->use_locked_device_memory && conf->use_rte_memory) {
RTE_ETHDEV_LOG(ERR,
"Attempt to use mutually exclusive memory settings for Rx queue");
return -EINVAL;
}
if (conf->force_memory &&
!conf->use_locked_device_memory &&
!conf->use_rte_memory) {
RTE_ETHDEV_LOG(ERR,
"Attempt to force Rx queue memory settings, but none is set");
return -EINVAL;
}
if (conf->peer_count == 0) {
RTE_ETHDEV_LOG(ERR,
"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
@ -2135,6 +2157,28 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
conf->peer_count, cap.max_tx_2_rx);
return -EINVAL;
}
if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) {
RTE_ETHDEV_LOG(ERR,
"Attempt to use locked device memory for Tx queue, which is not supported");
return -EINVAL;
}
if (conf->use_rte_memory && !cap.tx_cap.rte_memory) {
RTE_ETHDEV_LOG(ERR,
"Attempt to use DPDK memory for Tx queue, which is not supported");
return -EINVAL;
}
if (conf->use_locked_device_memory && conf->use_rte_memory) {
RTE_ETHDEV_LOG(ERR,
"Attempt to use mutually exclusive memory settings for Tx queue");
return -EINVAL;
}
if (conf->force_memory &&
!conf->use_locked_device_memory &&
!conf->use_rte_memory) {
RTE_ETHDEV_LOG(ERR,
"Attempt to force Tx queue memory settings, but none is set");
return -EINVAL;
}
if (conf->peer_count == 0) {
RTE_ETHDEV_LOG(ERR,
"Invalid value for number of peers for Tx queue(=%u), should be: > 0",

View File

@ -1092,6 +1092,28 @@ struct rte_eth_txconf {
void *reserved_ptrs[2]; /**< Reserved for future fields */
};
/**
* @warning
* @b EXPERIMENTAL: this API may change, or be removed, without prior notice
*
* A structure used to return the Tx or Rx hairpin queue capabilities.
*/
struct rte_eth_hairpin_queue_cap {
/**
* When set, PMD supports placing descriptors and/or data buffers
* in dedicated device memory.
*/
uint32_t locked_device_memory:1;
/**
* When set, PMD supports placing descriptors and/or data buffers
* in host memory managed by DPDK.
*/
uint32_t rte_memory:1;
uint32_t reserved:30; /**< Reserved for future fields */
};
/**
* @warning
* @b EXPERIMENTAL: this API may change, or be removed, without prior notice
@ -1106,6 +1128,8 @@ struct rte_eth_hairpin_cap {
/** Max number of Tx queues to be connected to one Rx queue. */
uint16_t max_tx_2_rx;
uint16_t max_nb_desc; /**< The max num of descriptors. */
struct rte_eth_hairpin_queue_cap rx_cap; /**< Rx hairpin queue capabilities. */
struct rte_eth_hairpin_queue_cap tx_cap; /**< Tx hairpin queue capabilities. */
};
#define RTE_ETH_MAX_HAIRPIN_PEERS 32
@ -1149,11 +1173,51 @@ struct rte_eth_hairpin_conf {
* function after all the queues are set up properly and the ports are
* started. Also, the hairpin unbind function should be called
* accordingly before stopping a port that with hairpin configured.
* - When clear, the PMD will try to enable the hairpin with the queues
* - When cleared, the PMD will try to enable the hairpin with the queues
* configured automatically during port start.
*/
uint32_t manual_bind:1;
uint32_t reserved:14; /**< Reserved bits. */
/**
* Use locked device memory as a backing storage.
*
* - When set, PMD will attempt place descriptors and/or data buffers
* in dedicated device memory.
* - When cleared, PMD will use default memory type as a backing storage.
* Please refer to PMD documentation for details.
*
* API user should check if PMD supports this configuration flag using
* @see rte_eth_dev_hairpin_capability_get.
*/
uint32_t use_locked_device_memory:1;
/**
* Use DPDK memory as backing storage.
*
* - When set, PMD will attempt place descriptors and/or data buffers
* in host memory managed by DPDK.
* - When cleared, PMD will use default memory type as a backing storage.
* Please refer to PMD documentation for details.
*
* API user should check if PMD supports this configuration flag using
* @see rte_eth_dev_hairpin_capability_get.
*/
uint32_t use_rte_memory:1;
/**
* Force usage of hairpin memory configuration.
*
* - When set, PMD will attempt to use specified memory settings.
* If resource allocation fails, then hairpin queue allocation
* will result in an error.
* - When clear, PMD will attempt to use specified memory settings.
* If resource allocation fails, then PMD will retry
* allocation with default configuration.
*/
uint32_t force_memory:1;
uint32_t reserved:11; /**< Reserved bits. */
struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
};