net/mlx5: synchronize flow counter pool creation
Currently, counter operations are not thread safe as the counter pools' array resize is not protected. This commit protects the container pools' array resize using a spinlock. The original counter pool statistic memory allocate is moved to the host thread in order to minimize the critical section. Since that pool statistic memory is required only in query time. The container pools' array should be resized by the user threads, the new pool may be used by other rte_flow APIs before the host thread resize is done, if the pool is not saved to the pools' array, the specified counter memory will not be found as the pool is not saved to the counter management pool array. The pool raw statistic memory will be filled in host thread. The shared counters will be protected in other commit. Signed-off-by: Suanming Mou <suanmingm@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
parent
994829e695
commit
3aa279157f
@ -1067,6 +1067,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
|
|||||||
err = -err;
|
err = -err;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
/* Check relax ordering support. */
|
||||||
|
if (config->hca_attr.relaxed_ordering_write &&
|
||||||
|
config->hca_attr.relaxed_ordering_read &&
|
||||||
|
!haswell_broadwell_cpu)
|
||||||
|
sh->cmng.relaxed_ordering = 1;
|
||||||
/* Check for LRO support. */
|
/* Check for LRO support. */
|
||||||
if (config->dest_tir && config->hca_attr.lro_cap &&
|
if (config->dest_tir && config->hca_attr.lro_cap &&
|
||||||
config->dv_flow_en) {
|
config->dv_flow_en) {
|
||||||
|
@ -487,8 +487,7 @@ mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
|
|||||||
sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET;
|
sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET;
|
||||||
sh->cmng.max_id = -1;
|
sh->cmng.max_id = -1;
|
||||||
sh->cmng.last_pool_idx = POOL_IDX_INVALID;
|
sh->cmng.last_pool_idx = POOL_IDX_INVALID;
|
||||||
TAILQ_INIT(&sh->cmng.pool_list);
|
rte_spinlock_init(&sh->cmng.pool_update_sl);
|
||||||
rte_spinlock_init(&sh->cmng.resize_sl);
|
|
||||||
for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
|
for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
|
||||||
TAILQ_INIT(&sh->cmng.counters[i]);
|
TAILQ_INIT(&sh->cmng.counters[i]);
|
||||||
rte_spinlock_init(&sh->cmng.csl[i]);
|
rte_spinlock_init(&sh->cmng.csl[i]);
|
||||||
@ -522,7 +521,7 @@ static void
|
|||||||
mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
|
mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
|
||||||
{
|
{
|
||||||
struct mlx5_counter_stats_mem_mng *mng;
|
struct mlx5_counter_stats_mem_mng *mng;
|
||||||
int j;
|
int i, j;
|
||||||
int retries = 1024;
|
int retries = 1024;
|
||||||
|
|
||||||
rte_errno = 0;
|
rte_errno = 0;
|
||||||
@ -535,9 +534,10 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
|
|||||||
|
|
||||||
if (sh->cmng.pools) {
|
if (sh->cmng.pools) {
|
||||||
struct mlx5_flow_counter_pool *pool;
|
struct mlx5_flow_counter_pool *pool;
|
||||||
|
uint16_t n_valid = sh->cmng.n_valid;
|
||||||
|
|
||||||
pool = TAILQ_FIRST(&sh->cmng.pool_list);
|
for (i = 0; i < n_valid; ++i) {
|
||||||
while (pool) {
|
pool = sh->cmng.pools[i];
|
||||||
if (!IS_EXT_POOL(pool) && pool->min_dcs)
|
if (!IS_EXT_POOL(pool) && pool->min_dcs)
|
||||||
claim_zero(mlx5_devx_cmd_destroy
|
claim_zero(mlx5_devx_cmd_destroy
|
||||||
(pool->min_dcs));
|
(pool->min_dcs));
|
||||||
@ -553,9 +553,7 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
|
|||||||
(MLX5_GET_POOL_CNT_EXT
|
(MLX5_GET_POOL_CNT_EXT
|
||||||
(pool, j)->dcs));
|
(pool, j)->dcs));
|
||||||
}
|
}
|
||||||
TAILQ_REMOVE(&sh->cmng.pool_list, pool, next);
|
|
||||||
mlx5_free(pool);
|
mlx5_free(pool);
|
||||||
pool = TAILQ_FIRST(&sh->cmng.pool_list);
|
|
||||||
}
|
}
|
||||||
mlx5_free(sh->cmng.pools);
|
mlx5_free(sh->cmng.pools);
|
||||||
}
|
}
|
||||||
|
@ -406,8 +406,10 @@ struct mlx5_flow_counter_pool {
|
|||||||
uint32_t type:2; /* Memory type behind the counter array. */
|
uint32_t type:2; /* Memory type behind the counter array. */
|
||||||
volatile uint32_t query_gen:1; /* Query round. */
|
volatile uint32_t query_gen:1; /* Query round. */
|
||||||
rte_spinlock_t sl; /* The pool lock. */
|
rte_spinlock_t sl; /* The pool lock. */
|
||||||
|
rte_spinlock_t csl; /* The pool counter free list lock. */
|
||||||
struct mlx5_counter_stats_raw *raw;
|
struct mlx5_counter_stats_raw *raw;
|
||||||
struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */
|
struct mlx5_counter_stats_raw *raw_hw;
|
||||||
|
/* The raw on HW working. */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Memory management structure for group of counter statistics raws. */
|
/* Memory management structure for group of counter statistics raws. */
|
||||||
@ -429,17 +431,16 @@ TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool);
|
|||||||
|
|
||||||
/* Counter global management structure. */
|
/* Counter global management structure. */
|
||||||
struct mlx5_flow_counter_mng {
|
struct mlx5_flow_counter_mng {
|
||||||
rte_atomic16_t n_valid; /* Number of valid pools. */
|
volatile uint16_t n_valid; /* Number of valid pools. */
|
||||||
uint16_t n; /* Number of pools. */
|
uint16_t n; /* Number of pools. */
|
||||||
uint16_t last_pool_idx; /* Last used pool index */
|
uint16_t last_pool_idx; /* Last used pool index */
|
||||||
int min_id; /* The minimum counter ID in the pools. */
|
int min_id; /* The minimum counter ID in the pools. */
|
||||||
int max_id; /* The maximum counter ID in the pools. */
|
int max_id; /* The maximum counter ID in the pools. */
|
||||||
rte_spinlock_t resize_sl; /* The resize lock. */
|
rte_spinlock_t pool_update_sl; /* The pool update lock. */
|
||||||
rte_spinlock_t csl[MLX5_COUNTER_TYPE_MAX];
|
rte_spinlock_t csl[MLX5_COUNTER_TYPE_MAX];
|
||||||
/* The counter free list lock. */
|
/* The counter free list lock. */
|
||||||
struct mlx5_counters counters[MLX5_COUNTER_TYPE_MAX];
|
struct mlx5_counters counters[MLX5_COUNTER_TYPE_MAX];
|
||||||
/* Free counter list. */
|
/* Free counter list. */
|
||||||
struct mlx5_counter_pools pool_list; /* Counter pool list. */
|
|
||||||
struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
|
struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
|
||||||
struct mlx5_counter_stats_mem_mng *mem_mng;
|
struct mlx5_counter_stats_mem_mng *mem_mng;
|
||||||
/* Hold the memory management for the next allocated pools raws. */
|
/* Hold the memory management for the next allocated pools raws. */
|
||||||
@ -447,6 +448,7 @@ struct mlx5_flow_counter_mng {
|
|||||||
uint8_t pending_queries;
|
uint8_t pending_queries;
|
||||||
uint16_t pool_index;
|
uint16_t pool_index;
|
||||||
uint8_t query_thread_on;
|
uint8_t query_thread_on;
|
||||||
|
bool relaxed_ordering;
|
||||||
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
|
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
|
||||||
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
|
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
|
||||||
};
|
};
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <rte_common.h>
|
#include <rte_common.h>
|
||||||
#include <rte_ether.h>
|
#include <rte_ether.h>
|
||||||
#include <rte_ethdev_driver.h>
|
#include <rte_ethdev_driver.h>
|
||||||
|
#include <rte_eal_paging.h>
|
||||||
#include <rte_flow.h>
|
#include <rte_flow.h>
|
||||||
#include <rte_cycles.h>
|
#include <rte_cycles.h>
|
||||||
#include <rte_flow_driver.h>
|
#include <rte_flow_driver.h>
|
||||||
@ -29,6 +30,7 @@
|
|||||||
#include "mlx5_flow.h"
|
#include "mlx5_flow.h"
|
||||||
#include "mlx5_flow_os.h"
|
#include "mlx5_flow_os.h"
|
||||||
#include "mlx5_rxtx.h"
|
#include "mlx5_rxtx.h"
|
||||||
|
#include "mlx5_common_os.h"
|
||||||
|
|
||||||
/** Device flow drivers. */
|
/** Device flow drivers. */
|
||||||
extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
|
extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
|
||||||
@ -6589,6 +6591,111 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
|
|||||||
return -ENOTSUP;
|
return -ENOTSUP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocate a new memory for the counter values wrapped by all the needed
|
||||||
|
* management.
|
||||||
|
*
|
||||||
|
* @param[in] sh
|
||||||
|
* Pointer to mlx5_dev_ctx_shared object.
|
||||||
|
*
|
||||||
|
* @return
|
||||||
|
* 0 on success, a negative errno value otherwise.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
|
||||||
|
{
|
||||||
|
struct mlx5_devx_mkey_attr mkey_attr;
|
||||||
|
struct mlx5_counter_stats_mem_mng *mem_mng;
|
||||||
|
volatile struct flow_counter_stats *raw_data;
|
||||||
|
int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
|
||||||
|
int size = (sizeof(struct flow_counter_stats) *
|
||||||
|
MLX5_COUNTERS_PER_POOL +
|
||||||
|
sizeof(struct mlx5_counter_stats_raw)) * raws_n +
|
||||||
|
sizeof(struct mlx5_counter_stats_mem_mng);
|
||||||
|
size_t pgsize = rte_mem_page_size();
|
||||||
|
uint8_t *mem;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (pgsize == (size_t)-1) {
|
||||||
|
DRV_LOG(ERR, "Failed to get mem page size");
|
||||||
|
rte_errno = ENOMEM;
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
|
||||||
|
if (!mem) {
|
||||||
|
rte_errno = ENOMEM;
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
|
||||||
|
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
|
||||||
|
mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
|
||||||
|
IBV_ACCESS_LOCAL_WRITE);
|
||||||
|
if (!mem_mng->umem) {
|
||||||
|
rte_errno = errno;
|
||||||
|
mlx5_free(mem);
|
||||||
|
return -rte_errno;
|
||||||
|
}
|
||||||
|
mkey_attr.addr = (uintptr_t)mem;
|
||||||
|
mkey_attr.size = size;
|
||||||
|
mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
|
||||||
|
mkey_attr.pd = sh->pdn;
|
||||||
|
mkey_attr.log_entity_size = 0;
|
||||||
|
mkey_attr.pg_access = 0;
|
||||||
|
mkey_attr.klm_array = NULL;
|
||||||
|
mkey_attr.klm_num = 0;
|
||||||
|
mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
|
||||||
|
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
|
||||||
|
if (!mem_mng->dm) {
|
||||||
|
mlx5_glue->devx_umem_dereg(mem_mng->umem);
|
||||||
|
rte_errno = errno;
|
||||||
|
mlx5_free(mem);
|
||||||
|
return -rte_errno;
|
||||||
|
}
|
||||||
|
mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
|
||||||
|
raw_data = (volatile struct flow_counter_stats *)mem;
|
||||||
|
for (i = 0; i < raws_n; ++i) {
|
||||||
|
mem_mng->raws[i].mem_mng = mem_mng;
|
||||||
|
mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
|
||||||
|
}
|
||||||
|
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
|
||||||
|
LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
|
||||||
|
mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
|
||||||
|
next);
|
||||||
|
LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
|
||||||
|
sh->cmng.mem_mng = mem_mng;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the statistic memory to the new counter pool.
|
||||||
|
*
|
||||||
|
* @param[in] sh
|
||||||
|
* Pointer to mlx5_dev_ctx_shared object.
|
||||||
|
* @param[in] pool
|
||||||
|
* Pointer to the pool to set the statistic memory.
|
||||||
|
*
|
||||||
|
* @return
|
||||||
|
* 0 on success, a negative errno value otherwise.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
|
||||||
|
struct mlx5_flow_counter_pool *pool)
|
||||||
|
{
|
||||||
|
struct mlx5_flow_counter_mng *cmng = &sh->cmng;
|
||||||
|
/* Resize statistic memory once used out. */
|
||||||
|
if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
|
||||||
|
mlx5_flow_create_counter_stat_mem_mng(sh)) {
|
||||||
|
DRV_LOG(ERR, "Cannot resize counter stat mem.");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
rte_spinlock_lock(&pool->sl);
|
||||||
|
pool->raw = cmng->mem_mng->raws + pool->index %
|
||||||
|
MLX5_CNT_CONTAINER_RESIZE;
|
||||||
|
rte_spinlock_unlock(&pool->sl);
|
||||||
|
pool->raw_hw = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#define MLX5_POOL_QUERY_FREQ_US 1000000
|
#define MLX5_POOL_QUERY_FREQ_US 1000000
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -6603,7 +6710,7 @@ mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
|
|||||||
{
|
{
|
||||||
uint32_t pools_n, us;
|
uint32_t pools_n, us;
|
||||||
|
|
||||||
pools_n = rte_atomic16_read(&sh->cmng.n_valid);
|
pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
|
||||||
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
|
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
|
||||||
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
|
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
|
||||||
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
|
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
|
||||||
@ -6629,12 +6736,17 @@ mlx5_flow_query_alarm(void *arg)
|
|||||||
uint16_t pool_index = sh->cmng.pool_index;
|
uint16_t pool_index = sh->cmng.pool_index;
|
||||||
struct mlx5_flow_counter_mng *cmng = &sh->cmng;
|
struct mlx5_flow_counter_mng *cmng = &sh->cmng;
|
||||||
struct mlx5_flow_counter_pool *pool;
|
struct mlx5_flow_counter_pool *pool;
|
||||||
|
uint16_t n_valid;
|
||||||
|
|
||||||
if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
|
if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
|
||||||
goto set_alarm;
|
goto set_alarm;
|
||||||
rte_spinlock_lock(&cmng->resize_sl);
|
rte_spinlock_lock(&cmng->pool_update_sl);
|
||||||
pool = cmng->pools[pool_index];
|
pool = cmng->pools[pool_index];
|
||||||
rte_spinlock_unlock(&cmng->resize_sl);
|
n_valid = cmng->n_valid;
|
||||||
|
rte_spinlock_unlock(&cmng->pool_update_sl);
|
||||||
|
/* Set the statistic memory to the new created pool. */
|
||||||
|
if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
|
||||||
|
goto set_alarm;
|
||||||
if (pool->raw_hw)
|
if (pool->raw_hw)
|
||||||
/* There is a pool query in progress. */
|
/* There is a pool query in progress. */
|
||||||
goto set_alarm;
|
goto set_alarm;
|
||||||
@ -6667,7 +6779,7 @@ mlx5_flow_query_alarm(void *arg)
|
|||||||
LIST_REMOVE(pool->raw_hw, next);
|
LIST_REMOVE(pool->raw_hw, next);
|
||||||
sh->cmng.pending_queries++;
|
sh->cmng.pending_queries++;
|
||||||
pool_index++;
|
pool_index++;
|
||||||
if (pool_index >= rte_atomic16_read(&cmng->n_valid))
|
if (pool_index >= n_valid)
|
||||||
pool_index = 0;
|
pool_index = 0;
|
||||||
set_alarm:
|
set_alarm:
|
||||||
sh->cmng.pool_index = pool_index;
|
sh->cmng.pool_index = pool_index;
|
||||||
|
@ -4653,104 +4653,35 @@ static struct mlx5_flow_counter_pool *
|
|||||||
flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
|
flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
|
||||||
{
|
{
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
struct mlx5_flow_counter_pool *pool = NULL;
|
||||||
|
|
||||||
|
rte_spinlock_lock(&cmng->pool_update_sl);
|
||||||
/* Check last used pool. */
|
/* Check last used pool. */
|
||||||
if (cmng->last_pool_idx != POOL_IDX_INVALID &&
|
if (cmng->last_pool_idx != POOL_IDX_INVALID &&
|
||||||
flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id))
|
flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
|
||||||
return cmng->pools[cmng->last_pool_idx];
|
pool = cmng->pools[cmng->last_pool_idx];
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
/* ID out of range means no suitable pool in the container. */
|
/* ID out of range means no suitable pool in the container. */
|
||||||
if (id > cmng->max_id || id < cmng->min_id)
|
if (id > cmng->max_id || id < cmng->min_id)
|
||||||
return NULL;
|
goto out;
|
||||||
/*
|
/*
|
||||||
* Find the pool from the end of the container, since mostly counter
|
* Find the pool from the end of the container, since mostly counter
|
||||||
* ID is sequence increasing, and the last pool should be the needed
|
* ID is sequence increasing, and the last pool should be the needed
|
||||||
* one.
|
* one.
|
||||||
*/
|
*/
|
||||||
i = rte_atomic16_read(&cmng->n_valid);
|
i = cmng->n_valid;
|
||||||
while (i--) {
|
while (i--) {
|
||||||
struct mlx5_flow_counter_pool *pool = cmng->pools[i];
|
struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
|
||||||
|
|
||||||
if (flow_dv_is_counter_in_pool(pool, id))
|
if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
|
||||||
return pool;
|
pool = pool_tmp;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return NULL;
|
out:
|
||||||
}
|
rte_spinlock_unlock(&cmng->pool_update_sl);
|
||||||
|
return pool;
|
||||||
/**
|
|
||||||
* Allocate a new memory for the counter values wrapped by all the needed
|
|
||||||
* management.
|
|
||||||
*
|
|
||||||
* @param[in] dev
|
|
||||||
* Pointer to the Ethernet device structure.
|
|
||||||
* @param[in] raws_n
|
|
||||||
* The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
|
|
||||||
*
|
|
||||||
* @return
|
|
||||||
* The new memory management pointer on success, otherwise NULL and rte_errno
|
|
||||||
* is set.
|
|
||||||
*/
|
|
||||||
static struct mlx5_counter_stats_mem_mng *
|
|
||||||
flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
|
|
||||||
{
|
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
|
||||||
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
|
||||||
struct mlx5_devx_mkey_attr mkey_attr;
|
|
||||||
struct mlx5_counter_stats_mem_mng *mem_mng;
|
|
||||||
volatile struct flow_counter_stats *raw_data;
|
|
||||||
int size = (sizeof(struct flow_counter_stats) *
|
|
||||||
MLX5_COUNTERS_PER_POOL +
|
|
||||||
sizeof(struct mlx5_counter_stats_raw)) * raws_n +
|
|
||||||
sizeof(struct mlx5_counter_stats_mem_mng);
|
|
||||||
size_t pgsize = rte_mem_page_size();
|
|
||||||
if (pgsize == (size_t)-1) {
|
|
||||||
DRV_LOG(ERR, "Failed to get mem page size");
|
|
||||||
rte_errno = ENOMEM;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize,
|
|
||||||
SOCKET_ID_ANY);
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!mem) {
|
|
||||||
rte_errno = ENOMEM;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
|
|
||||||
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
|
|
||||||
mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
|
|
||||||
IBV_ACCESS_LOCAL_WRITE);
|
|
||||||
if (!mem_mng->umem) {
|
|
||||||
rte_errno = errno;
|
|
||||||
mlx5_free(mem);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
mkey_attr.addr = (uintptr_t)mem;
|
|
||||||
mkey_attr.size = size;
|
|
||||||
mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
|
|
||||||
mkey_attr.pd = sh->pdn;
|
|
||||||
mkey_attr.log_entity_size = 0;
|
|
||||||
mkey_attr.pg_access = 0;
|
|
||||||
mkey_attr.klm_array = NULL;
|
|
||||||
mkey_attr.klm_num = 0;
|
|
||||||
if (priv->config.hca_attr.relaxed_ordering_write &&
|
|
||||||
priv->config.hca_attr.relaxed_ordering_read &&
|
|
||||||
!haswell_broadwell_cpu)
|
|
||||||
mkey_attr.relaxed_ordering = 1;
|
|
||||||
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
|
|
||||||
if (!mem_mng->dm) {
|
|
||||||
mlx5_glue->devx_umem_dereg(mem_mng->umem);
|
|
||||||
rte_errno = errno;
|
|
||||||
mlx5_free(mem);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
|
|
||||||
raw_data = (volatile struct flow_counter_stats *)mem;
|
|
||||||
for (i = 0; i < raws_n; ++i) {
|
|
||||||
mem_mng->raws[i].mem_mng = mem_mng;
|
|
||||||
mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
|
|
||||||
}
|
|
||||||
LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
|
|
||||||
return mem_mng;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -4767,7 +4698,6 @@ flow_dv_container_resize(struct rte_eth_dev *dev)
|
|||||||
{
|
{
|
||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||||
struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
|
|
||||||
void *old_pools = cmng->pools;
|
void *old_pools = cmng->pools;
|
||||||
uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
|
uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
|
||||||
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
|
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
|
||||||
@ -4780,30 +4710,8 @@ flow_dv_container_resize(struct rte_eth_dev *dev)
|
|||||||
if (old_pools)
|
if (old_pools)
|
||||||
memcpy(pools, old_pools, cmng->n *
|
memcpy(pools, old_pools, cmng->n *
|
||||||
sizeof(struct mlx5_flow_counter_pool *));
|
sizeof(struct mlx5_flow_counter_pool *));
|
||||||
/*
|
|
||||||
* Fallback mode query the counter directly, no background query
|
|
||||||
* resources are needed.
|
|
||||||
*/
|
|
||||||
if (!priv->counter_fallback) {
|
|
||||||
int i;
|
|
||||||
|
|
||||||
mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
|
|
||||||
MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
|
|
||||||
if (!mem_mng) {
|
|
||||||
mlx5_free(pools);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
|
|
||||||
LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
|
|
||||||
mem_mng->raws +
|
|
||||||
MLX5_CNT_CONTAINER_RESIZE +
|
|
||||||
i, next);
|
|
||||||
}
|
|
||||||
rte_spinlock_lock(&cmng->resize_sl);
|
|
||||||
cmng->n = resize;
|
cmng->n = resize;
|
||||||
cmng->mem_mng = mem_mng;
|
|
||||||
cmng->pools = pools;
|
cmng->pools = pools;
|
||||||
rte_spinlock_unlock(&cmng->resize_sl);
|
|
||||||
if (old_pools)
|
if (old_pools)
|
||||||
mlx5_free(old_pools);
|
mlx5_free(old_pools);
|
||||||
return 0;
|
return 0;
|
||||||
@ -4842,9 +4750,14 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
|
|||||||
0, pkts, bytes, 0, NULL, NULL, 0);
|
0, pkts, bytes, 0, NULL, NULL, 0);
|
||||||
}
|
}
|
||||||
rte_spinlock_lock(&pool->sl);
|
rte_spinlock_lock(&pool->sl);
|
||||||
offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
|
if (!pool->raw) {
|
||||||
*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
|
*pkts = 0;
|
||||||
*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
|
*bytes = 0;
|
||||||
|
} else {
|
||||||
|
offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
|
||||||
|
*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
|
||||||
|
*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
|
||||||
|
}
|
||||||
rte_spinlock_unlock(&pool->sl);
|
rte_spinlock_unlock(&pool->sl);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -4871,12 +4784,9 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
|
|||||||
struct mlx5_priv *priv = dev->data->dev_private;
|
struct mlx5_priv *priv = dev->data->dev_private;
|
||||||
struct mlx5_flow_counter_pool *pool;
|
struct mlx5_flow_counter_pool *pool;
|
||||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||||
int16_t n_valid = rte_atomic16_read(&cmng->n_valid);
|
|
||||||
uint32_t fallback = priv->counter_fallback;
|
uint32_t fallback = priv->counter_fallback;
|
||||||
uint32_t size = sizeof(*pool);
|
uint32_t size = sizeof(*pool);
|
||||||
|
|
||||||
if (cmng->n == n_valid && flow_dv_container_resize(dev))
|
|
||||||
return NULL;
|
|
||||||
size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
|
size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
|
||||||
size += (!fallback ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
|
size += (!fallback ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
|
||||||
size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
|
size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
|
||||||
@ -4885,24 +4795,26 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
|
|||||||
rte_errno = ENOMEM;
|
rte_errno = ENOMEM;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (!fallback) {
|
pool->raw = NULL;
|
||||||
pool->min_dcs = dcs;
|
|
||||||
pool->raw = cmng->mem_mng->raws + n_valid %
|
|
||||||
MLX5_CNT_CONTAINER_RESIZE;
|
|
||||||
}
|
|
||||||
pool->raw_hw = NULL;
|
|
||||||
pool->type = 0;
|
pool->type = 0;
|
||||||
pool->type |= (!fallback ? 0 : CNT_POOL_TYPE_EXT);
|
|
||||||
pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE);
|
pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE);
|
||||||
pool->query_gen = 0;
|
pool->query_gen = 0;
|
||||||
|
pool->min_dcs = dcs;
|
||||||
rte_spinlock_init(&pool->sl);
|
rte_spinlock_init(&pool->sl);
|
||||||
|
rte_spinlock_init(&pool->csl);
|
||||||
TAILQ_INIT(&pool->counters[0]);
|
TAILQ_INIT(&pool->counters[0]);
|
||||||
TAILQ_INIT(&pool->counters[1]);
|
TAILQ_INIT(&pool->counters[1]);
|
||||||
TAILQ_INSERT_HEAD(&cmng->pool_list, pool, next);
|
|
||||||
pool->index = n_valid;
|
|
||||||
pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
|
pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
|
||||||
cmng->pools[n_valid] = pool;
|
rte_spinlock_lock(&cmng->pool_update_sl);
|
||||||
if (fallback) {
|
pool->index = cmng->n_valid;
|
||||||
|
if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
|
||||||
|
mlx5_free(pool);
|
||||||
|
rte_spinlock_unlock(&cmng->pool_update_sl);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
cmng->pools[pool->index] = pool;
|
||||||
|
cmng->n_valid++;
|
||||||
|
if (unlikely(fallback)) {
|
||||||
int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
|
int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
|
||||||
|
|
||||||
if (base < cmng->min_id)
|
if (base < cmng->min_id)
|
||||||
@ -4910,10 +4822,9 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
|
|||||||
if (base > cmng->max_id)
|
if (base > cmng->max_id)
|
||||||
cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
|
cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
|
||||||
cmng->last_pool_idx = pool->index;
|
cmng->last_pool_idx = pool->index;
|
||||||
|
pool->type |= CNT_POOL_TYPE_EXT;
|
||||||
}
|
}
|
||||||
/* Pool initialization must be updated before host thread access. */
|
rte_spinlock_unlock(&cmng->pool_update_sl);
|
||||||
rte_io_wmb();
|
|
||||||
rte_atomic16_add(&cmng->n_valid, 1);
|
|
||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5219,12 +5130,16 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
if (!priv->counter_fallback) {
|
if (!priv->counter_fallback) {
|
||||||
|
rte_spinlock_lock(&pool->csl);
|
||||||
TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
|
TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
|
||||||
|
rte_spinlock_unlock(&pool->csl);
|
||||||
} else {
|
} else {
|
||||||
cnt_type = IS_AGE_POOL(pool) ? MLX5_COUNTER_TYPE_AGE :
|
cnt_type = IS_AGE_POOL(pool) ? MLX5_COUNTER_TYPE_AGE :
|
||||||
MLX5_COUNTER_TYPE_ORIGIN;
|
MLX5_COUNTER_TYPE_ORIGIN;
|
||||||
|
rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
|
||||||
TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
|
TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
|
||||||
cnt, next);
|
cnt, next);
|
||||||
|
rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,7 +259,7 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
|
|||||||
struct mlx5_flow_counter_ext *cnt_ext = NULL;
|
struct mlx5_flow_counter_ext *cnt_ext = NULL;
|
||||||
struct mlx5_flow_counter *cnt = NULL;
|
struct mlx5_flow_counter *cnt = NULL;
|
||||||
union mlx5_l3t_data data;
|
union mlx5_l3t_data data;
|
||||||
uint32_t n_valid = rte_atomic16_read(&cmng->n_valid);
|
uint32_t n_valid = cmng->n_valid;
|
||||||
uint32_t pool_idx, cnt_idx;
|
uint32_t pool_idx, cnt_idx;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
int ret;
|
int ret;
|
||||||
@ -317,8 +317,7 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
|
|||||||
cnt = MLX5_POOL_GET_CNT(pool, 0);
|
cnt = MLX5_POOL_GET_CNT(pool, 0);
|
||||||
cmng->pools[n_valid] = pool;
|
cmng->pools[n_valid] = pool;
|
||||||
pool_idx = n_valid;
|
pool_idx = n_valid;
|
||||||
rte_atomic16_add(&cmng->n_valid, 1);
|
cmng->n_valid++;
|
||||||
TAILQ_INSERT_HEAD(&cmng->pool_list, pool, next);
|
|
||||||
}
|
}
|
||||||
i = MLX5_CNT_ARRAY_IDX(pool, cnt);
|
i = MLX5_CNT_ARRAY_IDX(pool, cnt);
|
||||||
cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
|
cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
|
||||||
|
Loading…
Reference in New Issue
Block a user