net/mlx5: rename ib in names

Renames in this commit:
mlx5_ibv_list -> mlx5_dev_ctx_list
mlx5_alloc_shared_ibctx -> mlx5_alloc_shared_dev_ctx
mlx5_free_shared_ibctx -> mlx5_free_shared_dev_ctx
mlx5_ibv_shared_port -> mlx5_dev_shared_port
ibv_port -> dev_port

Signed-off-by: Ophir Munk <ophirmu@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
This commit is contained in:
Ophir Munk 2020-06-10 09:32:27 +00:00 committed by Ferruh Yigit
parent 21b7c452a6
commit 9138989036
10 changed files with 42 additions and 41 deletions

View File

@ -563,7 +563,7 @@ err_secondary:
strerror(rte_errno));
goto error;
}
sh = mlx5_alloc_shared_ibctx(spawn, &config);
sh = mlx5_alloc_shared_dev_ctx(spawn, &config);
if (!sh)
return NULL;
config.devx = sh->devx;
@ -693,7 +693,7 @@ err_secondary:
goto error;
}
priv->sh = sh;
priv->ibv_port = spawn->phys_port;
priv->dev_port = spawn->phys_port;
priv->pci_dev = spawn->pci_dev;
priv->mtu = RTE_ETHER_MTU;
priv->mp_id.port_id = port_id;
@ -1188,7 +1188,7 @@ error:
rte_eth_dev_release_port(eth_dev);
}
if (sh)
mlx5_free_shared_ibctx(sh);
mlx5_free_shared_dev_ctx(sh);
MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;

View File

@ -176,8 +176,9 @@ static struct mlx5_local_data mlx5_local_data;
/** Driver-specific log messages type. */
int mlx5_logtype;
static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
LIST_HEAD_INITIALIZER();
static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
@ -588,18 +589,18 @@ mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh)
}
/**
* Allocate shared IB device context. If there is multiport device the
* Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
* port dedicated IB device, the context will be used by only given
* port dedicated device, the context will be used by only given
* port due to unification.
*
* Routine first searches the context for the specified IB device name,
* Routine first searches the context for the specified device name,
* if found the shared context assumed and reference counter is incremented.
* If no context found the new one is created and initialized with specified
* IB device context and parameters.
* device context and parameters.
*
* @param[in] spawn
* Pointer to the IB device attributes (name, port, etc).
* Pointer to the device attributes (name, port, etc).
* @param[in] config
* Pointer to device configuration structure.
*
@ -608,8 +609,8 @@ mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh)
* otherwise NULL and rte_errno is set.
*/
struct mlx5_dev_ctx_shared *
mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
const struct mlx5_dev_config *config)
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
const struct mlx5_dev_config *config)
{
struct mlx5_dev_ctx_shared *sh;
int err = 0;
@ -619,9 +620,9 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
pthread_mutex_lock(&mlx5_ibv_list_mutex);
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
/* Search for IB context by device name. */
LIST_FOREACH(sh, &mlx5_ibv_list, next) {
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
if (!strcmp(sh->ibdev_name,
mlx5_os_get_dev_device_name(spawn->phys_dev))) {
sh->refcnt++;
@ -633,7 +634,7 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
sh = rte_zmalloc("ethdev shared ib context",
sizeof(struct mlx5_dev_ctx_shared) +
spawn->max_port *
sizeof(struct mlx5_ibv_shared_port),
sizeof(struct mlx5_dev_shared_port),
RTE_CACHE_LINE_SIZE);
if (!sh) {
DRV_LOG(ERR, "shared context allocation failure");
@ -722,12 +723,12 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
sh, mem_event_cb);
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
/* Add context to the global device list. */
LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
exit:
pthread_mutex_unlock(&mlx5_ibv_list_mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
pthread_mutex_unlock(&mlx5_ibv_list_mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
@ -753,14 +754,14 @@ error:
* Pointer to mlx5_dev_ctx_shared object to free
*/
void
mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh)
mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
pthread_mutex_lock(&mlx5_ibv_list_mutex);
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
struct mlx5_dev_ctx_shared *lctx;
LIST_FOREACH(lctx, &mlx5_ibv_list, next)
LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next)
if (lctx == sh)
break;
MLX5_ASSERT(lctx);
@ -802,7 +803,7 @@ mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh)
mlx5_flow_id_pool_release(sh->flow_id_pool);
rte_free(sh);
exit:
pthread_mutex_unlock(&mlx5_ibv_list_mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
}
/**
@ -1202,7 +1203,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
* mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
* ifindex if Netlink fails.
*/
mlx5_free_shared_ibctx(priv->sh);
mlx5_free_shared_dev_ctx(priv->sh);
if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
unsigned int c = 0;
uint16_t port_id;

View File

@ -446,7 +446,7 @@ struct mlx5_flow_counter_mng {
#define MLX5_AGE_GET(age_info, BIT) \
((age_info)->flags & (1 << (BIT)))
#define GET_PORT_AGE_INFO(priv) \
(&((priv)->sh->port[(priv)->ibv_port - 1].age_info))
(&((priv)->sh->port[(priv)->dev_port - 1].age_info))
/* Aging information for per port. */
struct mlx5_age_info {
@ -456,7 +456,7 @@ struct mlx5_age_info {
};
/* Per port data of shared IB device. */
struct mlx5_ibv_shared_port {
struct mlx5_dev_shared_port {
uint32_t ih_port_id;
uint32_t devx_ih_port_id;
/*
@ -571,7 +571,7 @@ struct mlx5_dev_ctx_shared {
struct mlx5_devx_obj *tis; /* TIS object. */
struct mlx5_devx_obj *td; /* Transport domain. */
struct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */
struct mlx5_ibv_shared_port port[]; /* per device port data array. */
struct mlx5_dev_shared_port port[]; /* per device port data array. */
};
/* Per-process private structure. */
@ -593,7 +593,7 @@ TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter);
struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
uint32_t ibv_port; /* IB device port number. */
uint32_t dev_port; /* Device port number. */
struct rte_pci_device *pci_dev; /* Backend PCI device. */
struct rte_ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
@ -697,9 +697,9 @@ void mlx5_dev_close(struct rte_eth_dev *dev);
port_id = mlx5_eth_find_next(port_id + 1, pci_dev))
int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
struct mlx5_dev_ctx_shared *
mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
const struct mlx5_dev_config *config);
void mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh);
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
const struct mlx5_dev_config *config);
void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
void mlx5_free_table_hash_list(struct mlx5_priv *priv);
int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,

View File

@ -509,7 +509,7 @@ mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
} flow_attr = {
.attr = {
.num_of_specs = 2,
.port = (uint8_t)priv->ibv_port,
.port = (uint8_t)priv->dev_port,
},
.eth = {
.type = IBV_FLOW_SPEC_ETH,

View File

@ -2725,7 +2725,7 @@ flow_dv_port_id_action_resource_register
*cache_resource = *resource;
/*
* Depending on rdma_core version the glue routine calls
* either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
* either mlx5dv_dr_action_create_dest_ib_port(domain, dev_port)
* or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
*/
cache_resource->action =
@ -7557,7 +7557,7 @@ flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
* This parameter is transferred to
* mlx5dv_dr_action_create_dest_ib_port().
*/
*dst_port_id = priv->ibv_port;
*dst_port_id = priv->dev_port;
#else
/*
* Legacy mode, no LAG configurations is supported.

View File

@ -1755,7 +1755,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
/* Other members of attr will be ignored. */
dev_flow->verbs.attr.priority =
mlx5_flow_adjust_priority(dev, priority, subpriority);
dev_flow->verbs.attr.port = (uint8_t)priv->ibv_port;
dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
return 0;
}

View File

@ -946,7 +946,7 @@ mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
container_of(txq, struct mlx5_txq_ctrl, txq);
struct ibv_qp_attr mod = {
.qp_state = IBV_QPS_RESET,
.port_num = (uint8_t)priv->ibv_port,
.port_num = (uint8_t)priv->dev_port,
};
struct ibv_qp *qp = txq_ctrl->obj->qp;

View File

@ -149,7 +149,7 @@ mlx5_read_ib_stat(struct mlx5_priv *priv, const char *ctr_name, uint64_t *stat)
if (priv->sh) {
MKSTR(path, "%s/ports/%d/hw_counters/%s",
priv->sh->ibdev_path,
priv->ibv_port,
priv->dev_port,
ctr_name);
fd = open(path, O_RDONLY);
if (fd != -1) {

View File

@ -342,7 +342,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
/* Enable datapath on secondary process. */
mlx5_mp_req_start_rxtx(dev);
if (priv->sh->intr_handle.fd >= 0) {
priv->sh->port[priv->ibv_port - 1].ih_port_id =
priv->sh->port[priv->dev_port - 1].ih_port_id =
(uint32_t)dev->data->port_id;
} else {
DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
@ -351,7 +351,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
dev->data->dev_conf.intr_conf.rmv = 0;
}
if (priv->sh->intr_handle_devx.fd >= 0)
priv->sh->port[priv->ibv_port - 1].devx_ih_port_id =
priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
(uint32_t)dev->data->port_id;
return 0;
error:
@ -394,8 +394,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
/* All RX queue flags will be cleared in the flush interface. */
mlx5_flow_list_flush(dev, &priv->flows, true);
mlx5_rx_intr_vec_disable(dev);
priv->sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
priv->sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
}

View File

@ -684,7 +684,7 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
/* IB device port number. */
.port_num = (uint8_t)priv->ibv_port,
.port_num = (uint8_t)priv->dev_port,
};
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
(IBV_QP_STATE | IBV_QP_PORT));