net/mlx5: switch to the shared IB device context

The code is updated to use the shared IB device context and
device handles. The IB device context is shared between
reprentors created over the single multiport IB device. All
Verbs and DevX objects will be created within this shared context.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
This commit is contained in:
Viacheslav Ovsiienko 2019-03-27 13:15:43 +00:00 committed by Ferruh Yigit
parent d485cdca01
commit f048f3d479
9 changed files with 33 additions and 31 deletions

View File

@ -402,7 +402,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
dev->data->port_id,
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
/* In case mlx5_dev_stop() has not been called. */
mlx5_dev_interrupt_handler_uninstall(dev);
mlx5_traffic_disable(dev);
@ -1095,7 +1095,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
goto error;
}
priv->sh = sh;
priv->ctx = sh->ctx;
priv->ibv_port = spawn->ibv_port;
priv->mtu = ETHER_MTU;
#ifndef RTE_ARCH_64

View File

@ -223,7 +223,6 @@ struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_ibv_shared *sh; /* Shared IB device context. */
uint32_t ibv_port; /* IB device port number. */
struct ibv_context *ctx; /* Verbs context. */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
/* Bit-field of MAC addresses owned by the PMD. */

View File

@ -1055,7 +1055,7 @@ mlx5_dev_status_handler(struct rte_eth_dev *dev)
}
/* Read all message and acknowledge them. */
for (;;) {
if (mlx5_glue->get_async_event(priv->ctx, &event))
if (mlx5_glue->get_async_event(priv->sh->ctx, &event))
break;
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
@ -1142,12 +1142,13 @@ void
mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_context *ctx = priv->sh->ctx;
int ret;
int flags;
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
assert(ctx->async_fd > 0);
flags = fcntl(ctx->async_fd, F_GETFL);
ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
DRV_LOG(INFO,
"port %u failed to change file descriptor async event"
@ -1158,7 +1159,7 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
}
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rmv) {
priv->intr_handle.fd = priv->ctx->async_fd;
priv->intr_handle.fd = ctx->async_fd;
priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
rte_intr_callback_register(&priv->intr_handle,
mlx5_dev_interrupt_handler, dev);
@ -1303,7 +1304,7 @@ mlx5_is_removed(struct rte_eth_dev *dev)
struct ibv_device_attr device_attr;
struct mlx5_priv *priv = dev->data->dev_private;
if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
return 1;
return 0;
}

View File

@ -836,7 +836,7 @@ flow_dv_encap_decap_resource_register
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_packet_reformat
(priv->ctx, cache_resource->size,
(priv->sh->ctx, cache_resource->size,
(cache_resource->size ? cache_resource->buf : NULL),
cache_resource->reformat_type,
cache_resource->ft_type);
@ -1468,7 +1468,7 @@ flow_dv_modify_hdr_resource_register
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_modify_header
(priv->ctx,
(priv->sh->ctx,
cache_resource->actions_num *
sizeof(cache_resource->actions[0]),
(uint64_t *)cache_resource->actions,
@ -1528,7 +1528,7 @@ flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
ret = -ENOMEM;
goto error_exit;
}
ret = mlx5_devx_cmd_flow_counter_alloc(priv->ctx, dcs);
ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
if (ret)
goto error_exit;
struct mlx5_flow_counter tmpl = {
@ -2787,7 +2787,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
if (matcher->egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr);
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
return rte_flow_error_set(error, ENOMEM,

View File

@ -56,10 +56,11 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_context *ctx = priv->sh->ctx;
struct ibv_counter_set_init_attr init = {
.counter_set_id = counter->id};
counter->cs = mlx5_glue->create_counter_set(priv->ctx, &init);
counter->cs = mlx5_glue->create_counter_set(ctx, &init);
if (!counter->cs) {
rte_errno = ENOTSUP;
return -ENOTSUP;
@ -67,12 +68,13 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
return 0;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_context *ctx = priv->sh->ctx;
struct ibv_counters_init_attr init = {0};
struct ibv_counter_attach_attr attach;
int ret;
memset(&attach, 0, sizeof(attach));
counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
counter->cs = mlx5_glue->create_counters(ctx, &init);
if (!counter->cs) {
rte_errno = ENOTSUP;
return -ENOTSUP;

View File

@ -799,7 +799,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
}
tmpl->rxq_ctrl = rxq_ctrl;
if (rxq_ctrl->irq) {
tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
if (!tmpl->channel) {
DRV_LOG(ERR, "port %u: comp channel creation failure",
dev->data->port_id);
@ -848,7 +848,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
}
#endif
tmpl->cq = mlx5_glue->cq_ex_to_cq
(mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
(mlx5_glue->dv_create_cq(priv->sh->ctx, &attr.cq.ibv,
&attr.cq.mlx5));
if (tmpl->cq == NULL) {
DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
@ -905,10 +905,10 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
};
}
tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &attr.wq.ibv,
&attr.wq.mlx5);
#else
tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &attr.wq.ibv);
#endif
if (tmpl->wq == NULL) {
DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
@ -1643,7 +1643,7 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
wq[i] = wq[j];
ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
(priv->ctx,
(priv->sh->ctx,
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = wq_n,
.ind_tbl = wq,
@ -1817,7 +1817,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
}
#endif
qp = mlx5_glue->dv_create_qp
(priv->ctx,
(priv->sh->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
@ -1836,7 +1836,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
&qp_init_attr);
#else
qp = mlx5_glue->create_qp_ex
(priv->ctx,
(priv->sh->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
@ -1988,20 +1988,21 @@ struct mlx5_rxq_ibv *
mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_context *ctx = priv->sh->ctx;
struct ibv_cq *cq;
struct ibv_wq *wq = NULL;
struct mlx5_rxq_ibv *rxq;
if (priv->drop_queue.rxq)
return priv->drop_queue.rxq;
cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
if (!cq) {
DEBUG("port %u cannot allocate CQ for drop queue",
dev->data->port_id);
rte_errno = errno;
goto error;
}
wq = mlx5_glue->create_wq(priv->ctx,
wq = mlx5_glue->create_wq(ctx,
&(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
@ -2078,7 +2079,7 @@ mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
if (!rxq)
return NULL;
tmpl.ind_table = mlx5_glue->create_rwq_ind_table
(priv->ctx,
(priv->sh->ctx,
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
.ind_tbl = &rxq->wq,
@ -2145,7 +2146,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
if (!ind_tbl)
return NULL;
qp = mlx5_glue->create_qp_ex(priv->ctx,
qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =

View File

@ -185,9 +185,9 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
assert(cmsg != NULL);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(priv->ctx->cmd_fd));
cmsg->cmsg_len = CMSG_LEN(sizeof(priv->sh->ctx->cmd_fd));
fd = (int *)CMSG_DATA(cmsg);
*fd = priv->ctx->cmd_fd;
*fd = priv->sh->ctx->cmd_fd;
ret = sendmsg(conn_sock, &msg, 0);
if (ret < 0)
DRV_LOG(WARNING, "port %u cannot send response",

View File

@ -58,7 +58,7 @@ mlx5_txq_start(struct rte_eth_dev *dev)
goto error;
}
}
ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
ret = mlx5_tx_uar_remap(dev, priv->sh->ctx->cmd_fd);
if (ret) {
/* Adjust index for rollback. */
i = priv->txqs_n - 1;

View File

@ -392,7 +392,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
dev->data->port_id, idx);
@ -435,7 +435,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
attr.init.max_tso_header = txq_ctrl->max_tso_header;
attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
if (tmpl.qp == NULL) {
DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
dev->data->port_id, idx);