net/mlx4: move rdma-core calls to separate file
This lays the groundwork for externalizing rdma-core as an optional run-time dependency instead of a mandatory one. No functional change. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
This commit is contained in:
parent
f69166c9a3
commit
4eba244b78
@ -38,6 +38,7 @@ LIB = librte_pmd_mlx4.a
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_ethdev.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_glue.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_intr.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_mr.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c
|
||||
|
@ -67,6 +67,7 @@
|
||||
#include <rte_mbuf.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "mlx4_glue.h"
|
||||
#include "mlx4_flow.h"
|
||||
#include "mlx4_rxtx.h"
|
||||
#include "mlx4_utils.h"
|
||||
@ -224,8 +225,8 @@ mlx4_dev_close(struct rte_eth_dev *dev)
|
||||
mlx4_tx_queue_release(dev->data->tx_queues[i]);
|
||||
if (priv->pd != NULL) {
|
||||
assert(priv->ctx != NULL);
|
||||
claim_zero(ibv_dealloc_pd(priv->pd));
|
||||
claim_zero(ibv_close_device(priv->ctx));
|
||||
claim_zero(mlx4_glue->dealloc_pd(priv->pd));
|
||||
claim_zero(mlx4_glue->close_device(priv->ctx));
|
||||
} else
|
||||
assert(priv->ctx == NULL);
|
||||
mlx4_intr_uninstall(priv);
|
||||
@ -442,7 +443,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
|
||||
(void)pci_drv;
|
||||
assert(pci_drv == &mlx4_driver);
|
||||
list = ibv_get_device_list(&i);
|
||||
list = mlx4_glue->get_device_list(&i);
|
||||
if (list == NULL) {
|
||||
rte_errno = errno;
|
||||
assert(rte_errno);
|
||||
@ -471,12 +472,12 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
|
||||
INFO("PCI information matches, using device \"%s\" (VF: %s)",
|
||||
list[i]->name, (vf ? "true" : "false"));
|
||||
attr_ctx = ibv_open_device(list[i]);
|
||||
attr_ctx = mlx4_glue->open_device(list[i]);
|
||||
err = errno;
|
||||
break;
|
||||
}
|
||||
if (attr_ctx == NULL) {
|
||||
ibv_free_device_list(list);
|
||||
mlx4_glue->free_device_list(list);
|
||||
switch (err) {
|
||||
case 0:
|
||||
rte_errno = ENODEV;
|
||||
@ -493,7 +494,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
}
|
||||
ibv_dev = list[i];
|
||||
DEBUG("device opened");
|
||||
if (ibv_query_device(attr_ctx, &device_attr)) {
|
||||
if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
|
||||
rte_errno = ENODEV;
|
||||
goto error;
|
||||
}
|
||||
@ -508,7 +509,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
if (!conf.ports.enabled)
|
||||
conf.ports.enabled = conf.ports.present;
|
||||
/* Retrieve extended device attributes. */
|
||||
if (ibv_query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
|
||||
if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
|
||||
rte_errno = ENODEV;
|
||||
goto error;
|
||||
}
|
||||
@ -526,13 +527,13 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
if (!(conf.ports.enabled & (1 << i)))
|
||||
continue;
|
||||
DEBUG("using port %u", port);
|
||||
ctx = ibv_open_device(ibv_dev);
|
||||
ctx = mlx4_glue->open_device(ibv_dev);
|
||||
if (ctx == NULL) {
|
||||
rte_errno = ENODEV;
|
||||
goto port_error;
|
||||
}
|
||||
/* Check port status. */
|
||||
err = ibv_query_port(ctx, port, &port_attr);
|
||||
err = mlx4_glue->query_port(ctx, port, &port_attr);
|
||||
if (err) {
|
||||
rte_errno = err;
|
||||
ERROR("port query failed: %s", strerror(rte_errno));
|
||||
@ -546,7 +547,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
}
|
||||
if (port_attr.state != IBV_PORT_ACTIVE)
|
||||
DEBUG("port %d is not active: \"%s\" (%d)",
|
||||
port, ibv_port_state_str(port_attr.state),
|
||||
port, mlx4_glue->port_state_str(port_attr.state),
|
||||
port_attr.state);
|
||||
/* Make asynchronous FD non-blocking to handle interrupts. */
|
||||
if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
|
||||
@ -555,7 +556,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
goto port_error;
|
||||
}
|
||||
/* Allocate protection domain. */
|
||||
pd = ibv_alloc_pd(ctx);
|
||||
pd = mlx4_glue->alloc_pd(ctx);
|
||||
if (pd == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("PD allocation failure");
|
||||
@ -634,7 +635,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
char name[RTE_ETH_NAME_MAX_LEN];
|
||||
|
||||
snprintf(name, sizeof(name), "%s port %u",
|
||||
ibv_get_device_name(ibv_dev), port);
|
||||
mlx4_glue->get_device_name(ibv_dev), port);
|
||||
eth_dev = rte_eth_dev_allocate(name);
|
||||
}
|
||||
if (eth_dev == NULL) {
|
||||
@ -677,9 +678,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
port_error:
|
||||
rte_free(priv);
|
||||
if (pd)
|
||||
claim_zero(ibv_dealloc_pd(pd));
|
||||
claim_zero(mlx4_glue->dealloc_pd(pd));
|
||||
if (ctx)
|
||||
claim_zero(ibv_close_device(ctx));
|
||||
claim_zero(mlx4_glue->close_device(ctx));
|
||||
if (eth_dev)
|
||||
rte_eth_dev_release_port(eth_dev);
|
||||
break;
|
||||
@ -694,9 +695,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
||||
*/
|
||||
error:
|
||||
if (attr_ctx)
|
||||
claim_zero(ibv_close_device(attr_ctx));
|
||||
claim_zero(mlx4_glue->close_device(attr_ctx));
|
||||
if (list)
|
||||
ibv_free_device_list(list);
|
||||
mlx4_glue->free_device_list(list);
|
||||
assert(rte_errno >= 0);
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -749,7 +750,7 @@ rte_mlx4_pmd_init(void)
|
||||
* using this PMD, which is not supported in forked processes.
|
||||
*/
|
||||
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
|
||||
ibv_fork_init();
|
||||
mlx4_glue->fork_init();
|
||||
rte_pci_register(&mlx4_driver);
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,7 @@
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "mlx4_flow.h"
|
||||
#include "mlx4_glue.h"
|
||||
#include "mlx4_rxtx.h"
|
||||
#include "mlx4_utils.h"
|
||||
|
||||
@ -1068,7 +1069,7 @@ mlx4_is_removed(struct rte_eth_dev *dev)
|
||||
struct ibv_device_attr device_attr;
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
|
||||
if (ibv_query_device(priv->ctx, &device_attr) == EIO)
|
||||
if (mlx4_glue->query_device(priv->ctx, &device_attr) == EIO)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -65,6 +65,7 @@
|
||||
|
||||
/* PMD headers. */
|
||||
#include "mlx4.h"
|
||||
#include "mlx4_glue.h"
|
||||
#include "mlx4_flow.h"
|
||||
#include "mlx4_rxtx.h"
|
||||
#include "mlx4_utils.h"
|
||||
@ -922,24 +923,25 @@ mlx4_drop_get(struct priv *priv)
|
||||
.priv = priv,
|
||||
.refcnt = 1,
|
||||
};
|
||||
drop->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
|
||||
drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
|
||||
if (!drop->cq)
|
||||
goto error;
|
||||
drop->qp = ibv_create_qp(priv->pd,
|
||||
&(struct ibv_qp_init_attr){
|
||||
.send_cq = drop->cq,
|
||||
.recv_cq = drop->cq,
|
||||
.qp_type = IBV_QPT_RAW_PACKET,
|
||||
});
|
||||
drop->qp = mlx4_glue->create_qp
|
||||
(priv->pd,
|
||||
&(struct ibv_qp_init_attr){
|
||||
.send_cq = drop->cq,
|
||||
.recv_cq = drop->cq,
|
||||
.qp_type = IBV_QPT_RAW_PACKET,
|
||||
});
|
||||
if (!drop->qp)
|
||||
goto error;
|
||||
priv->drop = drop;
|
||||
return drop;
|
||||
error:
|
||||
if (drop->qp)
|
||||
claim_zero(ibv_destroy_qp(drop->qp));
|
||||
claim_zero(mlx4_glue->destroy_qp(drop->qp));
|
||||
if (drop->cq)
|
||||
claim_zero(ibv_destroy_cq(drop->cq));
|
||||
claim_zero(mlx4_glue->destroy_cq(drop->cq));
|
||||
if (drop)
|
||||
rte_free(drop);
|
||||
rte_errno = ENOMEM;
|
||||
@ -959,8 +961,8 @@ mlx4_drop_put(struct mlx4_drop *drop)
|
||||
if (--drop->refcnt)
|
||||
return;
|
||||
drop->priv->drop = NULL;
|
||||
claim_zero(ibv_destroy_qp(drop->qp));
|
||||
claim_zero(ibv_destroy_cq(drop->cq));
|
||||
claim_zero(mlx4_glue->destroy_qp(drop->qp));
|
||||
claim_zero(mlx4_glue->destroy_cq(drop->cq));
|
||||
rte_free(drop);
|
||||
}
|
||||
|
||||
@ -992,7 +994,7 @@ mlx4_flow_toggle(struct priv *priv,
|
||||
if (!enable) {
|
||||
if (!flow->ibv_flow)
|
||||
return 0;
|
||||
claim_zero(ibv_destroy_flow(flow->ibv_flow));
|
||||
claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
|
||||
flow->ibv_flow = NULL;
|
||||
if (flow->drop)
|
||||
mlx4_drop_put(priv->drop);
|
||||
@ -1005,7 +1007,7 @@ mlx4_flow_toggle(struct priv *priv,
|
||||
!priv->isolated &&
|
||||
flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
|
||||
if (flow->ibv_flow) {
|
||||
claim_zero(ibv_destroy_flow(flow->ibv_flow));
|
||||
claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
|
||||
flow->ibv_flow = NULL;
|
||||
if (flow->drop)
|
||||
mlx4_drop_put(priv->drop);
|
||||
@ -1035,7 +1037,7 @@ mlx4_flow_toggle(struct priv *priv,
|
||||
if (missing ^ !flow->drop)
|
||||
return 0;
|
||||
/* Verbs flow needs updating. */
|
||||
claim_zero(ibv_destroy_flow(flow->ibv_flow));
|
||||
claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
|
||||
flow->ibv_flow = NULL;
|
||||
if (flow->drop)
|
||||
mlx4_drop_put(priv->drop);
|
||||
@ -1067,7 +1069,7 @@ mlx4_flow_toggle(struct priv *priv,
|
||||
assert(qp);
|
||||
if (flow->ibv_flow)
|
||||
return 0;
|
||||
flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
|
||||
flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
|
||||
if (flow->ibv_flow)
|
||||
return 0;
|
||||
if (flow->drop)
|
||||
|
275
drivers/net/mlx4/mlx4_glue.c
Normal file
275
drivers/net/mlx4/mlx4_glue.c
Normal file
@ -0,0 +1,275 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright 2018 6WIND S.A.
|
||||
* Copyright 2018 Mellanox
|
||||
*/
|
||||
|
||||
/* Verbs headers do not support -pedantic. */
|
||||
#ifdef PEDANTIC
|
||||
#pragma GCC diagnostic ignored "-Wpedantic"
|
||||
#endif
|
||||
#include <infiniband/mlx4dv.h>
|
||||
#include <infiniband/verbs.h>
|
||||
#ifdef PEDANTIC
|
||||
#pragma GCC diagnostic error "-Wpedantic"
|
||||
#endif
|
||||
|
||||
#include "mlx4_glue.h"
|
||||
|
||||
static int
|
||||
mlx4_glue_fork_init(void)
|
||||
{
|
||||
return ibv_fork_init();
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_get_async_event(struct ibv_context *context,
|
||||
struct ibv_async_event *event)
|
||||
{
|
||||
return ibv_get_async_event(context, event);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx4_glue_ack_async_event(struct ibv_async_event *event)
|
||||
{
|
||||
ibv_ack_async_event(event);
|
||||
}
|
||||
|
||||
static struct ibv_pd *
|
||||
mlx4_glue_alloc_pd(struct ibv_context *context)
|
||||
{
|
||||
return ibv_alloc_pd(context);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_dealloc_pd(struct ibv_pd *pd)
|
||||
{
|
||||
return ibv_dealloc_pd(pd);
|
||||
}
|
||||
|
||||
static struct ibv_device **
|
||||
mlx4_glue_get_device_list(int *num_devices)
|
||||
{
|
||||
return ibv_get_device_list(num_devices);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx4_glue_free_device_list(struct ibv_device **list)
|
||||
{
|
||||
ibv_free_device_list(list);
|
||||
}
|
||||
|
||||
static struct ibv_context *
|
||||
mlx4_glue_open_device(struct ibv_device *device)
|
||||
{
|
||||
return ibv_open_device(device);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_close_device(struct ibv_context *context)
|
||||
{
|
||||
return ibv_close_device(context);
|
||||
}
|
||||
|
||||
static const char *
|
||||
mlx4_glue_get_device_name(struct ibv_device *device)
|
||||
{
|
||||
return ibv_get_device_name(device);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_query_device(struct ibv_context *context,
|
||||
struct ibv_device_attr *device_attr)
|
||||
{
|
||||
return ibv_query_device(context, device_attr);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_query_device_ex(struct ibv_context *context,
|
||||
const struct ibv_query_device_ex_input *input,
|
||||
struct ibv_device_attr_ex *attr)
|
||||
{
|
||||
return ibv_query_device_ex(context, input, attr);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_query_port(struct ibv_context *context, uint8_t port_num,
|
||||
struct ibv_port_attr *port_attr)
|
||||
{
|
||||
return ibv_query_port(context, port_num, port_attr);
|
||||
}
|
||||
|
||||
static const char *
|
||||
mlx4_glue_port_state_str(enum ibv_port_state port_state)
|
||||
{
|
||||
return ibv_port_state_str(port_state);
|
||||
}
|
||||
|
||||
static struct ibv_comp_channel *
|
||||
mlx4_glue_create_comp_channel(struct ibv_context *context)
|
||||
{
|
||||
return ibv_create_comp_channel(context);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
|
||||
{
|
||||
return ibv_destroy_comp_channel(channel);
|
||||
}
|
||||
|
||||
static struct ibv_cq *
|
||||
mlx4_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
|
||||
struct ibv_comp_channel *channel, int comp_vector)
|
||||
{
|
||||
return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_destroy_cq(struct ibv_cq *cq)
|
||||
{
|
||||
return ibv_destroy_cq(cq);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
|
||||
void **cq_context)
|
||||
{
|
||||
return ibv_get_cq_event(channel, cq, cq_context);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx4_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
|
||||
{
|
||||
ibv_ack_cq_events(cq, nevents);
|
||||
}
|
||||
|
||||
static struct ibv_flow *
|
||||
mlx4_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
|
||||
{
|
||||
return ibv_create_flow(qp, flow);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_destroy_flow(struct ibv_flow *flow_id)
|
||||
{
|
||||
return ibv_destroy_flow(flow_id);
|
||||
}
|
||||
|
||||
static struct ibv_qp *
|
||||
mlx4_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
|
||||
{
|
||||
return ibv_create_qp(pd, qp_init_attr);
|
||||
}
|
||||
|
||||
static struct ibv_qp *
|
||||
mlx4_glue_create_qp_ex(struct ibv_context *context,
|
||||
struct ibv_qp_init_attr_ex *qp_init_attr_ex)
|
||||
{
|
||||
return ibv_create_qp_ex(context, qp_init_attr_ex);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_destroy_qp(struct ibv_qp *qp)
|
||||
{
|
||||
return ibv_destroy_qp(qp);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
|
||||
{
|
||||
return ibv_modify_qp(qp, attr, attr_mask);
|
||||
}
|
||||
|
||||
static struct ibv_mr *
|
||||
mlx4_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
|
||||
{
|
||||
return ibv_reg_mr(pd, addr, length, access);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_dereg_mr(struct ibv_mr *mr)
|
||||
{
|
||||
return ibv_dereg_mr(mr);
|
||||
}
|
||||
|
||||
static struct ibv_rwq_ind_table *
|
||||
mlx4_glue_create_rwq_ind_table(struct ibv_context *context,
|
||||
struct ibv_rwq_ind_table_init_attr *init_attr)
|
||||
{
|
||||
return ibv_create_rwq_ind_table(context, init_attr);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
|
||||
{
|
||||
return ibv_destroy_rwq_ind_table(rwq_ind_table);
|
||||
}
|
||||
|
||||
static struct ibv_wq *
|
||||
mlx4_glue_create_wq(struct ibv_context *context,
|
||||
struct ibv_wq_init_attr *wq_init_attr)
|
||||
{
|
||||
return ibv_create_wq(context, wq_init_attr);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_destroy_wq(struct ibv_wq *wq)
|
||||
{
|
||||
return ibv_destroy_wq(wq);
|
||||
}
|
||||
static int
|
||||
mlx4_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
|
||||
{
|
||||
return ibv_modify_wq(wq, wq_attr);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_dv_init_obj(struct mlx4dv_obj *obj, uint64_t obj_type)
|
||||
{
|
||||
return mlx4dv_init_obj(obj, obj_type);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx4_glue_dv_set_context_attr(struct ibv_context *context,
|
||||
enum mlx4dv_set_ctx_attr_type attr_type,
|
||||
void *attr)
|
||||
{
|
||||
return mlx4dv_set_context_attr(context, attr_type, attr);
|
||||
}
|
||||
|
||||
const struct mlx4_glue *mlx4_glue = &(const struct mlx4_glue){
|
||||
.fork_init = mlx4_glue_fork_init,
|
||||
.get_async_event = mlx4_glue_get_async_event,
|
||||
.ack_async_event = mlx4_glue_ack_async_event,
|
||||
.alloc_pd = mlx4_glue_alloc_pd,
|
||||
.dealloc_pd = mlx4_glue_dealloc_pd,
|
||||
.get_device_list = mlx4_glue_get_device_list,
|
||||
.free_device_list = mlx4_glue_free_device_list,
|
||||
.open_device = mlx4_glue_open_device,
|
||||
.close_device = mlx4_glue_close_device,
|
||||
.get_device_name = mlx4_glue_get_device_name,
|
||||
.query_device = mlx4_glue_query_device,
|
||||
.query_device_ex = mlx4_glue_query_device_ex,
|
||||
.query_port = mlx4_glue_query_port,
|
||||
.port_state_str = mlx4_glue_port_state_str,
|
||||
.create_comp_channel = mlx4_glue_create_comp_channel,
|
||||
.destroy_comp_channel = mlx4_glue_destroy_comp_channel,
|
||||
.create_cq = mlx4_glue_create_cq,
|
||||
.destroy_cq = mlx4_glue_destroy_cq,
|
||||
.get_cq_event = mlx4_glue_get_cq_event,
|
||||
.ack_cq_events = mlx4_glue_ack_cq_events,
|
||||
.create_flow = mlx4_glue_create_flow,
|
||||
.destroy_flow = mlx4_glue_destroy_flow,
|
||||
.create_qp = mlx4_glue_create_qp,
|
||||
.create_qp_ex = mlx4_glue_create_qp_ex,
|
||||
.destroy_qp = mlx4_glue_destroy_qp,
|
||||
.modify_qp = mlx4_glue_modify_qp,
|
||||
.reg_mr = mlx4_glue_reg_mr,
|
||||
.dereg_mr = mlx4_glue_dereg_mr,
|
||||
.create_rwq_ind_table = mlx4_glue_create_rwq_ind_table,
|
||||
.destroy_rwq_ind_table = mlx4_glue_destroy_rwq_ind_table,
|
||||
.create_wq = mlx4_glue_create_wq,
|
||||
.destroy_wq = mlx4_glue_destroy_wq,
|
||||
.modify_wq = mlx4_glue_modify_wq,
|
||||
.dv_init_obj = mlx4_glue_dv_init_obj,
|
||||
.dv_set_context_attr = mlx4_glue_dv_set_context_attr,
|
||||
};
|
80
drivers/net/mlx4/mlx4_glue.h
Normal file
80
drivers/net/mlx4/mlx4_glue.h
Normal file
@ -0,0 +1,80 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright 2018 6WIND S.A.
|
||||
* Copyright 2018 Mellanox
|
||||
*/
|
||||
|
||||
#ifndef MLX4_GLUE_H_
|
||||
#define MLX4_GLUE_H_
|
||||
|
||||
/* Verbs headers do not support -pedantic. */
|
||||
#ifdef PEDANTIC
|
||||
#pragma GCC diagnostic ignored "-Wpedantic"
|
||||
#endif
|
||||
#include <infiniband/mlx4dv.h>
|
||||
#include <infiniband/verbs.h>
|
||||
#ifdef PEDANTIC
|
||||
#pragma GCC diagnostic error "-Wpedantic"
|
||||
#endif
|
||||
|
||||
struct mlx4_glue {
|
||||
int (*fork_init)(void);
|
||||
int (*get_async_event)(struct ibv_context *context,
|
||||
struct ibv_async_event *event);
|
||||
void (*ack_async_event)(struct ibv_async_event *event);
|
||||
struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
|
||||
int (*dealloc_pd)(struct ibv_pd *pd);
|
||||
struct ibv_device **(*get_device_list)(int *num_devices);
|
||||
void (*free_device_list)(struct ibv_device **list);
|
||||
struct ibv_context *(*open_device)(struct ibv_device *device);
|
||||
int (*close_device)(struct ibv_context *context);
|
||||
const char *(*get_device_name)(struct ibv_device *device);
|
||||
int (*query_device)(struct ibv_context *context,
|
||||
struct ibv_device_attr *device_attr);
|
||||
int (*query_device_ex)(struct ibv_context *context,
|
||||
const struct ibv_query_device_ex_input *input,
|
||||
struct ibv_device_attr_ex *attr);
|
||||
int (*query_port)(struct ibv_context *context, uint8_t port_num,
|
||||
struct ibv_port_attr *port_attr);
|
||||
const char *(*port_state_str)(enum ibv_port_state port_state);
|
||||
struct ibv_comp_channel *(*create_comp_channel)
|
||||
(struct ibv_context *context);
|
||||
int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
|
||||
struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
|
||||
void *cq_context,
|
||||
struct ibv_comp_channel *channel,
|
||||
int comp_vector);
|
||||
int (*destroy_cq)(struct ibv_cq *cq);
|
||||
int (*get_cq_event)(struct ibv_comp_channel *channel,
|
||||
struct ibv_cq **cq, void **cq_context);
|
||||
void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
|
||||
struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
|
||||
struct ibv_flow_attr *flow);
|
||||
int (*destroy_flow)(struct ibv_flow *flow_id);
|
||||
struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
|
||||
struct ibv_qp_init_attr *qp_init_attr);
|
||||
struct ibv_qp *(*create_qp_ex)
|
||||
(struct ibv_context *context,
|
||||
struct ibv_qp_init_attr_ex *qp_init_attr_ex);
|
||||
int (*destroy_qp)(struct ibv_qp *qp);
|
||||
int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
|
||||
int attr_mask);
|
||||
struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
|
||||
size_t length, int access);
|
||||
int (*dereg_mr)(struct ibv_mr *mr);
|
||||
struct ibv_rwq_ind_table *(*create_rwq_ind_table)
|
||||
(struct ibv_context *context,
|
||||
struct ibv_rwq_ind_table_init_attr *init_attr);
|
||||
int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
|
||||
struct ibv_wq *(*create_wq)(struct ibv_context *context,
|
||||
struct ibv_wq_init_attr *wq_init_attr);
|
||||
int (*destroy_wq)(struct ibv_wq *wq);
|
||||
int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
|
||||
int (*dv_init_obj)(struct mlx4dv_obj *obj, uint64_t obj_type);
|
||||
int (*dv_set_context_attr)(struct ibv_context *context,
|
||||
enum mlx4dv_set_ctx_attr_type attr_type,
|
||||
void *attr);
|
||||
};
|
||||
|
||||
const struct mlx4_glue *mlx4_glue;
|
||||
|
||||
#endif /* MLX4_GLUE_H_ */
|
@ -57,6 +57,7 @@
|
||||
#include <rte_interrupts.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "mlx4_glue.h"
|
||||
#include "mlx4_rxtx.h"
|
||||
#include "mlx4_utils.h"
|
||||
|
||||
@ -216,7 +217,7 @@ mlx4_interrupt_handler(struct priv *priv)
|
||||
unsigned int i;
|
||||
|
||||
/* Read all message and acknowledge them. */
|
||||
while (!ibv_get_async_event(priv->ctx, &event)) {
|
||||
while (!mlx4_glue->get_async_event(priv->ctx, &event)) {
|
||||
switch (event.event_type) {
|
||||
case IBV_EVENT_PORT_ACTIVE:
|
||||
case IBV_EVENT_PORT_ERR:
|
||||
@ -231,7 +232,7 @@ mlx4_interrupt_handler(struct priv *priv)
|
||||
DEBUG("event type %d on physical port %d not handled",
|
||||
event.event_type, event.element.port_num);
|
||||
}
|
||||
ibv_ack_async_event(&event);
|
||||
mlx4_glue->ack_async_event(&event);
|
||||
}
|
||||
for (i = 0; i != RTE_DIM(caught); ++i)
|
||||
if (caught[i])
|
||||
@ -352,7 +353,8 @@ mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (!rxq || !rxq->channel) {
|
||||
ret = EINVAL;
|
||||
} else {
|
||||
ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
|
||||
ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq,
|
||||
&ev_ctx);
|
||||
if (ret || ev_cq != rxq->cq)
|
||||
ret = EINVAL;
|
||||
}
|
||||
@ -362,7 +364,7 @@ mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
|
||||
idx);
|
||||
} else {
|
||||
rxq->mcq.arm_sn++;
|
||||
ibv_ack_cq_events(rxq->cq, 1);
|
||||
mlx4_glue->ack_cq_events(rxq->cq, 1);
|
||||
}
|
||||
return -ret;
|
||||
}
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include <rte_mempool.h>
|
||||
#include <rte_spinlock.h>
|
||||
|
||||
#include "mlx4_glue.h"
|
||||
#include "mlx4_rxtx.h"
|
||||
#include "mlx4_utils.h"
|
||||
|
||||
@ -200,8 +201,8 @@ mlx4_mr_get(struct priv *priv, struct rte_mempool *mp)
|
||||
.end = end,
|
||||
.refcnt = 1,
|
||||
.priv = priv,
|
||||
.mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
|
||||
IBV_ACCESS_LOCAL_WRITE),
|
||||
.mr = mlx4_glue->reg_mr(priv->pd, (void *)start, end - start,
|
||||
IBV_ACCESS_LOCAL_WRITE),
|
||||
.mp = mp,
|
||||
};
|
||||
if (mr->mr) {
|
||||
@ -240,7 +241,7 @@ mlx4_mr_put(struct mlx4_mr *mr)
|
||||
if (--mr->refcnt)
|
||||
goto release;
|
||||
LIST_REMOVE(mr, next);
|
||||
claim_zero(ibv_dereg_mr(mr->mr));
|
||||
claim_zero(mlx4_glue->dereg_mr(mr->mr));
|
||||
rte_free(mr);
|
||||
release:
|
||||
rte_spinlock_unlock(&priv->mr_lock);
|
||||
|
@ -62,6 +62,7 @@
|
||||
#include <rte_mempool.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "mlx4_glue.h"
|
||||
#include "mlx4_flow.h"
|
||||
#include "mlx4_rxtx.h"
|
||||
#include "mlx4_utils.h"
|
||||
@ -231,7 +232,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
|
||||
}
|
||||
ind_tbl[i] = rxq->wq;
|
||||
}
|
||||
rss->ind = ibv_create_rwq_ind_table
|
||||
rss->ind = mlx4_glue->create_rwq_ind_table
|
||||
(priv->ctx,
|
||||
&(struct ibv_rwq_ind_table_init_attr){
|
||||
.log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
|
||||
@ -243,7 +244,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
|
||||
msg = "RSS indirection table creation failure";
|
||||
goto error;
|
||||
}
|
||||
rss->qp = ibv_create_qp_ex
|
||||
rss->qp = mlx4_glue->create_qp_ex
|
||||
(priv->ctx,
|
||||
&(struct ibv_qp_init_attr_ex){
|
||||
.comp_mask = (IBV_QP_INIT_ATTR_PD |
|
||||
@ -264,7 +265,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
|
||||
msg = "RSS hash QP creation failure";
|
||||
goto error;
|
||||
}
|
||||
ret = ibv_modify_qp
|
||||
ret = mlx4_glue->modify_qp
|
||||
(rss->qp,
|
||||
&(struct ibv_qp_attr){
|
||||
.qp_state = IBV_QPS_INIT,
|
||||
@ -275,7 +276,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
|
||||
msg = "failed to switch RSS hash QP to INIT state";
|
||||
goto error;
|
||||
}
|
||||
ret = ibv_modify_qp
|
||||
ret = mlx4_glue->modify_qp
|
||||
(rss->qp,
|
||||
&(struct ibv_qp_attr){
|
||||
.qp_state = IBV_QPS_RTR,
|
||||
@ -288,11 +289,11 @@ mlx4_rss_attach(struct mlx4_rss *rss)
|
||||
return 0;
|
||||
error:
|
||||
if (rss->qp) {
|
||||
claim_zero(ibv_destroy_qp(rss->qp));
|
||||
claim_zero(mlx4_glue->destroy_qp(rss->qp));
|
||||
rss->qp = NULL;
|
||||
}
|
||||
if (rss->ind) {
|
||||
claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
|
||||
claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
|
||||
rss->ind = NULL;
|
||||
}
|
||||
while (i--)
|
||||
@ -325,9 +326,9 @@ mlx4_rss_detach(struct mlx4_rss *rss)
|
||||
assert(rss->ind);
|
||||
if (--rss->usecnt)
|
||||
return;
|
||||
claim_zero(ibv_destroy_qp(rss->qp));
|
||||
claim_zero(mlx4_glue->destroy_qp(rss->qp));
|
||||
rss->qp = NULL;
|
||||
claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
|
||||
claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
|
||||
rss->ind = NULL;
|
||||
for (i = 0; i != rss->queues; ++i)
|
||||
mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
|
||||
@ -364,9 +365,10 @@ mlx4_rss_init(struct priv *priv)
|
||||
int ret;
|
||||
|
||||
/* Prepare range for RSS contexts before creating the first WQ. */
|
||||
ret = mlx4dv_set_context_attr(priv->ctx,
|
||||
MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
|
||||
&log2_range);
|
||||
ret = mlx4_glue->dv_set_context_attr
|
||||
(priv->ctx,
|
||||
MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
|
||||
&log2_range);
|
||||
if (ret) {
|
||||
ERROR("cannot set up range size for RSS context to %u"
|
||||
" (for %u Rx queues), error: %s",
|
||||
@ -402,13 +404,13 @@ mlx4_rss_init(struct priv *priv)
|
||||
* sequentially and are guaranteed to never be reused in the
|
||||
* same context by the underlying implementation.
|
||||
*/
|
||||
cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
|
||||
cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
|
||||
if (!cq) {
|
||||
ret = ENOMEM;
|
||||
msg = "placeholder CQ creation failure";
|
||||
goto error;
|
||||
}
|
||||
wq = ibv_create_wq
|
||||
wq = mlx4_glue->create_wq
|
||||
(priv->ctx,
|
||||
&(struct ibv_wq_init_attr){
|
||||
.wq_type = IBV_WQT_RQ,
|
||||
@ -419,11 +421,11 @@ mlx4_rss_init(struct priv *priv)
|
||||
});
|
||||
if (wq) {
|
||||
wq_num = wq->wq_num;
|
||||
claim_zero(ibv_destroy_wq(wq));
|
||||
claim_zero(mlx4_glue->destroy_wq(wq));
|
||||
} else {
|
||||
wq_num = 0; /* Shut up GCC 4.8 warnings. */
|
||||
}
|
||||
claim_zero(ibv_destroy_cq(cq));
|
||||
claim_zero(mlx4_glue->destroy_cq(cq));
|
||||
if (!wq) {
|
||||
ret = ENOMEM;
|
||||
msg = "placeholder WQ creation failure";
|
||||
@ -522,13 +524,14 @@ mlx4_rxq_attach(struct rxq *rxq)
|
||||
int ret;
|
||||
|
||||
assert(rte_is_power_of_2(elts_n));
|
||||
cq = ibv_create_cq(priv->ctx, elts_n / sges_n, NULL, rxq->channel, 0);
|
||||
cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL,
|
||||
rxq->channel, 0);
|
||||
if (!cq) {
|
||||
ret = ENOMEM;
|
||||
msg = "CQ creation failure";
|
||||
goto error;
|
||||
}
|
||||
wq = ibv_create_wq
|
||||
wq = mlx4_glue->create_wq
|
||||
(priv->ctx,
|
||||
&(struct ibv_wq_init_attr){
|
||||
.wq_type = IBV_WQT_RQ,
|
||||
@ -542,7 +545,7 @@ mlx4_rxq_attach(struct rxq *rxq)
|
||||
msg = "WQ creation failure";
|
||||
goto error;
|
||||
}
|
||||
ret = ibv_modify_wq
|
||||
ret = mlx4_glue->modify_wq
|
||||
(wq,
|
||||
&(struct ibv_wq_attr){
|
||||
.attr_mask = IBV_WQ_ATTR_STATE,
|
||||
@ -557,7 +560,7 @@ mlx4_rxq_attach(struct rxq *rxq)
|
||||
mlxdv.cq.out = &dv_cq;
|
||||
mlxdv.rwq.in = wq;
|
||||
mlxdv.rwq.out = &dv_rwq;
|
||||
ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
|
||||
ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
|
||||
if (ret) {
|
||||
msg = "failed to obtain device information from WQ/CQ objects";
|
||||
goto error;
|
||||
@ -619,9 +622,9 @@ mlx4_rxq_attach(struct rxq *rxq)
|
||||
return 0;
|
||||
error:
|
||||
if (wq)
|
||||
claim_zero(ibv_destroy_wq(wq));
|
||||
claim_zero(mlx4_glue->destroy_wq(wq));
|
||||
if (cq)
|
||||
claim_zero(ibv_destroy_cq(cq));
|
||||
claim_zero(mlx4_glue->destroy_cq(cq));
|
||||
rte_errno = ret;
|
||||
ERROR("error while attaching Rx queue %p: %s: %s",
|
||||
(void *)rxq, msg, strerror(ret));
|
||||
@ -649,9 +652,9 @@ mlx4_rxq_detach(struct rxq *rxq)
|
||||
memset(&rxq->mcq, 0, sizeof(rxq->mcq));
|
||||
rxq->rq_db = NULL;
|
||||
rxq->wqes = NULL;
|
||||
claim_zero(ibv_destroy_wq(rxq->wq));
|
||||
claim_zero(mlx4_glue->destroy_wq(rxq->wq));
|
||||
rxq->wq = NULL;
|
||||
claim_zero(ibv_destroy_cq(rxq->cq));
|
||||
claim_zero(mlx4_glue->destroy_cq(rxq->cq));
|
||||
rxq->cq = NULL;
|
||||
DEBUG("%p: freeing Rx queue elements", (void *)rxq);
|
||||
for (i = 0; (i != RTE_DIM(*elts)); ++i) {
|
||||
@ -879,7 +882,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
goto error;
|
||||
}
|
||||
if (dev->data->dev_conf.intr_conf.rxq) {
|
||||
rxq->channel = ibv_create_comp_channel(priv->ctx);
|
||||
rxq->channel = mlx4_glue->create_comp_channel(priv->ctx);
|
||||
if (rxq->channel == NULL) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: Rx interrupt completion channel creation"
|
||||
@ -934,7 +937,7 @@ mlx4_rx_queue_release(void *dpdk_rxq)
|
||||
assert(!rxq->wqes);
|
||||
assert(!rxq->rq_db);
|
||||
if (rxq->channel)
|
||||
claim_zero(ibv_destroy_comp_channel(rxq->channel));
|
||||
claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
|
||||
if (rxq->mr)
|
||||
mlx4_mr_put(rxq->mr);
|
||||
rte_free(rxq);
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include <rte_mempool.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "mlx4_glue.h"
|
||||
#include "mlx4_prm.h"
|
||||
#include "mlx4_rxtx.h"
|
||||
#include "mlx4_utils.h"
|
||||
@ -350,7 +351,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
.lb = !!priv->vf,
|
||||
.bounce_buf = bounce_buf,
|
||||
};
|
||||
txq->cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
|
||||
txq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0);
|
||||
if (!txq->cq) {
|
||||
rte_errno = ENOMEM;
|
||||
ERROR("%p: CQ creation failure: %s",
|
||||
@ -370,7 +371,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
/* No completion events must occur by default. */
|
||||
.sq_sig_all = 0,
|
||||
};
|
||||
txq->qp = ibv_create_qp(priv->pd, &qp_init_attr);
|
||||
txq->qp = mlx4_glue->create_qp(priv->pd, &qp_init_attr);
|
||||
if (!txq->qp) {
|
||||
rte_errno = errno ? errno : EINVAL;
|
||||
ERROR("%p: QP creation failure: %s",
|
||||
@ -378,7 +379,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
goto error;
|
||||
}
|
||||
txq->max_inline = qp_init_attr.cap.max_inline_data;
|
||||
ret = ibv_modify_qp
|
||||
ret = mlx4_glue->modify_qp
|
||||
(txq->qp,
|
||||
&(struct ibv_qp_attr){
|
||||
.qp_state = IBV_QPS_INIT,
|
||||
@ -391,7 +392,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = ibv_modify_qp
|
||||
ret = mlx4_glue->modify_qp
|
||||
(txq->qp,
|
||||
&(struct ibv_qp_attr){
|
||||
.qp_state = IBV_QPS_RTR,
|
||||
@ -403,7 +404,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = ibv_modify_qp
|
||||
ret = mlx4_glue->modify_qp
|
||||
(txq->qp,
|
||||
&(struct ibv_qp_attr){
|
||||
.qp_state = IBV_QPS_RTS,
|
||||
@ -420,7 +421,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
mlxdv.cq.out = &dv_cq;
|
||||
mlxdv.qp.in = txq->qp;
|
||||
mlxdv.qp.out = &dv_qp;
|
||||
ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ);
|
||||
ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ);
|
||||
if (ret) {
|
||||
rte_errno = EINVAL;
|
||||
ERROR("%p: failed to obtain information needed for"
|
||||
@ -470,9 +471,9 @@ mlx4_tx_queue_release(void *dpdk_txq)
|
||||
}
|
||||
mlx4_txq_free_elts(txq);
|
||||
if (txq->qp)
|
||||
claim_zero(ibv_destroy_qp(txq->qp));
|
||||
claim_zero(mlx4_glue->destroy_qp(txq->qp));
|
||||
if (txq->cq)
|
||||
claim_zero(ibv_destroy_cq(txq->cq));
|
||||
claim_zero(mlx4_glue->destroy_cq(txq->cq));
|
||||
for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) {
|
||||
if (!txq->mp2mr[i].mp)
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user