ethdev: make flow API thread safe
Currently, the rte_flow functions are not defined as thread safe. DPDK applications either call the functions in single thread or protect any concurrent calling for the rte_flow operations using a lock. For PMDs support the flow operations thread safe natively, the redundant protection in application hurts the performance of the rte_flow operation functions. And the restriction of thread safe is not guaranteed for the rte_flow functions also limits the applications' expectation. This feature is going to change the rte_flow functions to be thread safe. As different PMDs have different flow operations, some may support thread safe already and others may not. For PMDs don't support flow thread safe operation, a new lock is defined in ethdev in order to protects thread unsafe PMDs from rte_flow level. A new RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE device flag is added to determine whether the PMD supports thread safe flow operation or not. For PMDs support thread safe flow operations, set the RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE flag, rte_flow level functions will skip the thread safe helper lock for these PMDs. Again the rte_flow level thread safe lock only works when PMD operation functions are not thread safe. For the PMDs which don't want the default mutex lock, just set the flag in the PMD, and add the prefer type of lock in the PMD. Then the default mutex lock is easily replaced by the PMD level lock. The change has no effect on the current DPDK applications. No change is required for the current DPDK applications. For the standard posix pthread_mutex, if no lock contention with the added rte_flow level mutex, the mutex only does the atomic increasing in pthread_mutex_lock() and decreasing in pthread_mutex_unlock(). No futex() syscall will be involved. Signed-off-by: Suanming Mou <suanmingm@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
This commit is contained in:
parent
96bb99f270
commit
80d1a9aff7
@ -3046,10 +3046,6 @@ Caveats
|
||||
- API operations are synchronous and blocking (``EAGAIN`` cannot be
|
||||
returned).
|
||||
|
||||
- There is no provision for re-entrancy/multi-thread safety, although nothing
|
||||
should prevent different devices from being configured at the same
|
||||
time. PMDs may protect their control path functions accordingly.
|
||||
|
||||
- Stopping the data path (TX/RX) should not be necessary when managing flow
|
||||
rules. If this cannot be achieved naturally or with workarounds (such as
|
||||
temporarily replacing the burst function pointers), an appropriate error
|
||||
@ -3101,6 +3097,14 @@ This interface additionally defines the following helper function:
|
||||
- ``rte_flow_ops_get()``: get generic flow operations structure from a
|
||||
port.
|
||||
|
||||
If PMD interfaces don't support re-entrancy/multi-thread safety,
|
||||
the rte_flow API functions will protect threads by mutex per port.
|
||||
The application can check whether ``RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE``
|
||||
is set in ``dev_flags``, meaning the PMD is thread-safe regarding rte_flow,
|
||||
so the API level protection is disabled.
|
||||
Please note that this API-level mutex protects only rte_flow functions,
|
||||
other control path functions are not in scope.
|
||||
|
||||
More will be added over time.
|
||||
|
||||
Device compatibility
|
||||
|
@ -79,6 +79,12 @@ New Features
|
||||
Added the FEC API which provides functions for query FEC capabilities and
|
||||
current FEC mode from device. Also, API for configuring FEC mode is also provided.
|
||||
|
||||
* **Added thread safety to rte_flow functions.**
|
||||
|
||||
Added ``RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE`` device flag to indicate
|
||||
whether PMD supports thread safe operations. If PMD doesn't set the flag,
|
||||
rte_flow API level functions will protect the flow operations with mutex.
|
||||
|
||||
* **Updated Broadcom bnxt driver.**
|
||||
|
||||
Updated the Broadcom bnxt driver with new features and improvements, including:
|
||||
|
@ -500,6 +500,7 @@ rte_eth_dev_allocate(const char *name)
|
||||
strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
|
||||
eth_dev->data->port_id = port_id;
|
||||
eth_dev->data->mtu = RTE_ETHER_MTU;
|
||||
pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL);
|
||||
|
||||
unlock:
|
||||
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
|
||||
@ -564,6 +565,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
|
||||
rte_free(eth_dev->data->mac_addrs);
|
||||
rte_free(eth_dev->data->hash_mac_addrs);
|
||||
rte_free(eth_dev->data->dev_private);
|
||||
pthread_mutex_destroy(ð_dev->data->flow_ops_mutex);
|
||||
memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
|
||||
}
|
||||
|
||||
|
@ -1677,6 +1677,8 @@ struct rte_eth_dev_owner {
|
||||
char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */
|
||||
};
|
||||
|
||||
/** PMD supports thread-safe flow operations */
|
||||
#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE 0x0001
|
||||
/** Device supports link state interrupt */
|
||||
#define RTE_ETH_DEV_INTR_LSC 0x0002
|
||||
/** Device is a bonded slave */
|
||||
|
@ -5,6 +5,9 @@
|
||||
#ifndef _RTE_ETHDEV_CORE_H_
|
||||
#define _RTE_ETHDEV_CORE_H_
|
||||
|
||||
#include <pthread.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
@ -180,6 +183,7 @@ struct rte_eth_dev_data {
|
||||
* Valid if RTE_ETH_DEV_REPRESENTOR in dev_flags.
|
||||
*/
|
||||
|
||||
pthread_mutex_t flow_ops_mutex; /**< rte_flow ops mutex. */
|
||||
uint64_t reserved_64s[4]; /**< Reserved for future fields */
|
||||
void *reserved_ptrs[4]; /**< Reserved for future fields */
|
||||
} __rte_cache_aligned;
|
||||
|
@ -207,6 +207,20 @@ rte_flow_dynf_metadata_register(void)
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
static inline void
|
||||
fts_enter(struct rte_eth_dev *dev)
|
||||
{
|
||||
if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
|
||||
pthread_mutex_lock(&dev->data->flow_ops_mutex);
|
||||
}
|
||||
|
||||
static inline void
|
||||
fts_exit(struct rte_eth_dev *dev)
|
||||
{
|
||||
if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
|
||||
pthread_mutex_unlock(&dev->data->flow_ops_mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
|
||||
{
|
||||
@ -253,12 +267,16 @@ rte_flow_validate(uint16_t port_id,
|
||||
{
|
||||
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
|
||||
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
||||
int ret;
|
||||
|
||||
if (unlikely(!ops))
|
||||
return -rte_errno;
|
||||
if (likely(!!ops->validate))
|
||||
return flow_err(port_id, ops->validate(dev, attr, pattern,
|
||||
actions, error), error);
|
||||
if (likely(!!ops->validate)) {
|
||||
fts_enter(dev);
|
||||
ret = ops->validate(dev, attr, pattern, actions, error);
|
||||
fts_exit(dev);
|
||||
return flow_err(port_id, ret, error);
|
||||
}
|
||||
return rte_flow_error_set(error, ENOSYS,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, rte_strerror(ENOSYS));
|
||||
@ -279,7 +297,9 @@ rte_flow_create(uint16_t port_id,
|
||||
if (unlikely(!ops))
|
||||
return NULL;
|
||||
if (likely(!!ops->create)) {
|
||||
fts_enter(dev);
|
||||
flow = ops->create(dev, attr, pattern, actions, error);
|
||||
fts_exit(dev);
|
||||
if (flow == NULL)
|
||||
flow_err(port_id, -rte_errno, error);
|
||||
return flow;
|
||||
@ -297,12 +317,16 @@ rte_flow_destroy(uint16_t port_id,
|
||||
{
|
||||
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
||||
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
|
||||
int ret;
|
||||
|
||||
if (unlikely(!ops))
|
||||
return -rte_errno;
|
||||
if (likely(!!ops->destroy))
|
||||
return flow_err(port_id, ops->destroy(dev, flow, error),
|
||||
error);
|
||||
if (likely(!!ops->destroy)) {
|
||||
fts_enter(dev);
|
||||
ret = ops->destroy(dev, flow, error);
|
||||
fts_exit(dev);
|
||||
return flow_err(port_id, ret, error);
|
||||
}
|
||||
return rte_flow_error_set(error, ENOSYS,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, rte_strerror(ENOSYS));
|
||||
@ -315,11 +339,16 @@ rte_flow_flush(uint16_t port_id,
|
||||
{
|
||||
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
||||
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
|
||||
int ret;
|
||||
|
||||
if (unlikely(!ops))
|
||||
return -rte_errno;
|
||||
if (likely(!!ops->flush))
|
||||
return flow_err(port_id, ops->flush(dev, error), error);
|
||||
if (likely(!!ops->flush)) {
|
||||
fts_enter(dev);
|
||||
ret = ops->flush(dev, error);
|
||||
fts_exit(dev);
|
||||
return flow_err(port_id, ret, error);
|
||||
}
|
||||
return rte_flow_error_set(error, ENOSYS,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, rte_strerror(ENOSYS));
|
||||
@ -335,12 +364,16 @@ rte_flow_query(uint16_t port_id,
|
||||
{
|
||||
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
||||
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
|
||||
int ret;
|
||||
|
||||
if (!ops)
|
||||
return -rte_errno;
|
||||
if (likely(!!ops->query))
|
||||
return flow_err(port_id, ops->query(dev, flow, action, data,
|
||||
error), error);
|
||||
if (likely(!!ops->query)) {
|
||||
fts_enter(dev);
|
||||
ret = ops->query(dev, flow, action, data, error);
|
||||
fts_exit(dev);
|
||||
return flow_err(port_id, ret, error);
|
||||
}
|
||||
return rte_flow_error_set(error, ENOSYS,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, rte_strerror(ENOSYS));
|
||||
@ -354,11 +387,16 @@ rte_flow_isolate(uint16_t port_id,
|
||||
{
|
||||
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
||||
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
|
||||
int ret;
|
||||
|
||||
if (!ops)
|
||||
return -rte_errno;
|
||||
if (likely(!!ops->isolate))
|
||||
return flow_err(port_id, ops->isolate(dev, set, error), error);
|
||||
if (likely(!!ops->isolate)) {
|
||||
fts_enter(dev);
|
||||
ret = ops->isolate(dev, set, error);
|
||||
fts_exit(dev);
|
||||
return flow_err(port_id, ret, error);
|
||||
}
|
||||
return rte_flow_error_set(error, ENOSYS,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, rte_strerror(ENOSYS));
|
||||
@ -962,12 +1000,16 @@ rte_flow_dev_dump(uint16_t port_id, FILE *file, struct rte_flow_error *error)
|
||||
{
|
||||
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
||||
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
|
||||
int ret;
|
||||
|
||||
if (unlikely(!ops))
|
||||
return -rte_errno;
|
||||
if (likely(!!ops->dev_dump))
|
||||
return flow_err(port_id, ops->dev_dump(dev, file, error),
|
||||
error);
|
||||
if (likely(!!ops->dev_dump)) {
|
||||
fts_enter(dev);
|
||||
ret = ops->dev_dump(dev, file, error);
|
||||
fts_exit(dev);
|
||||
return flow_err(port_id, ret, error);
|
||||
}
|
||||
return rte_flow_error_set(error, ENOSYS,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, rte_strerror(ENOSYS));
|
||||
@ -979,12 +1021,16 @@ rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
|
||||
{
|
||||
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
||||
const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
|
||||
int ret;
|
||||
|
||||
if (unlikely(!ops))
|
||||
return -rte_errno;
|
||||
if (likely(!!ops->get_aged_flows))
|
||||
return flow_err(port_id, ops->get_aged_flows(dev, contexts,
|
||||
nb_contexts, error), error);
|
||||
if (likely(!!ops->get_aged_flows)) {
|
||||
fts_enter(dev);
|
||||
ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
|
||||
fts_exit(dev);
|
||||
return flow_err(port_id, ret, error);
|
||||
}
|
||||
return rte_flow_error_set(error, ENOTSUP,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, rte_strerror(ENOTSUP));
|
||||
|
Loading…
Reference in New Issue
Block a user