rdma: Add mlx5_dv RDMA provider

The new RDMA provider can be enabled by passing
--with-rdma=mlx5_dv parameter to configure script
This provider uses "externally created qpair"
functionality of rdma cm - it must move a qpair
to RTS state manually

Change-Id: I72484f6edd1f4dad15430e2c8d36b65d1975e8a2
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1658
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
Alexey Marchuk 2020-03-14 15:08:29 +03:00 committed by Tomasz Zawadzki
parent 63c8cea783
commit daee62a05b
13 changed files with 319 additions and 14 deletions

View File

@ -15,6 +15,13 @@ The software accel engine implemenation has added support for CRC-32C.
IDXD engine support for CRC-32C has been added.
### rdma
A new `rdma` library has been added. It is an abstraction layer over different RDMA providers.
Two providers are available - verbs (used by default when RDMA is enabled or enabled explicitly
using --with-rdma=verbs) and mlx5 Direct Verbs aka DV (enabled by --with-rdma=mlx5_dv).
Using mlx5_dv requires libmlx5 installed on the system.
## v20.04:
### configuration

1
CONFIG
View File

@ -100,6 +100,7 @@ CONFIG_FIO_SOURCE_DIR=/usr/src/fio
CONFIG_RDMA=n
CONFIG_RDMA_SEND_WITH_INVAL=n
CONFIG_RDMA_SET_ACK_TIMEOUT=n
CONFIG_RDMA_PROV=verbs
# Enable NVMe Character Devices.
CONFIG_NVME_CUSE=n

26
configure vendored
View File

@ -74,7 +74,8 @@ function usage()
echo " rbd Build Ceph RBD bdev module."
echo " No path required."
echo " rdma Build RDMA transport for NVMf target and initiator."
echo " No path required."
echo " Accepts optional RDMA provider name. Can be \"verbs\" or \"mlx5_dv\"."
echo " If no provider specified, \"verbs\" provider is used by default."
echo " fc Build FC transport for NVMf target."
echo " If an argument is provided, it is considered a directory containing"
echo " libufc.a and fc_lld.h. Otherwise the regular system paths will"
@ -292,8 +293,13 @@ for i in "$@"; do
--without-rbd)
CONFIG[RBD]=n
;;
--with-rdma=*)
CONFIG[RDMA]=y
CONFIG[RDMA_PROV]=${i#*=}
;;
--with-rdma)
CONFIG[RDMA]=y
CONFIG[RDMA_PROV]="verbs"
;;
--without-rdma)
CONFIG[RDMA]=n
@ -593,6 +599,11 @@ if [[ $sys_name == "FreeBSD" ]]; then
fi
if [ "${CONFIG[RDMA]}" = "y" ]; then
if [[ ! "${CONFIG[RDMA_PROV]}" == "verbs" ]] && [[ ! "${CONFIG[RDMA_PROV]}" == "mlx5_dv" ]]; then
echo "Invalid RDMA provider specified, must be \"verbs\" or \"mlx5_dv\""
exit 1
fi
if ! echo -e '#include <infiniband/verbs.h>\n#include <rdma/rdma_verbs.h>\n' \
'int main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -libverbs -lrdmacm - 2>/dev/null; then
@ -632,6 +643,19 @@ than or equal to 4.14 will see significantly reduced performance.
echo "RDMA_OPTION_ID_ACK_TIMEOUT is not supported"
fi
if [ "${CONFIG[RDMA_PROV]}" == "mlx5_dv" ]; then
if ! echo -e '#include <spdk/stdinc.h>\n' \
'#include <infiniband/mlx5dv.h>\n' \
'#include <rdma/rdma_cma.h>\n' \
'int main(void) { return rdma_establish(NULL) || ' \
'!!IBV_QP_INIT_ATTR_SEND_OPS_FLAGS || !!MLX5_OPCODE_RDMA_WRITE; }\n' \
| ${BUILD_CMD[@]} -lmlx5 -I${rootdir}/include -c - 2>/dev/null; then
echo "mlx5_dv provider is not supported"
exit 1
fi
fi
echo "Using "${CONFIG[RDMA_PROV]}" RDMA provider"
fi
if [[ "${CONFIG[FC]}" = "y" ]]; then

View File

@ -45,6 +45,7 @@ struct spdk_rdma_qp_init_attr {
struct ibv_srq *srq;
struct ibv_qp_cap cap;
struct ibv_pd *pd;
bool initiator_side;
};
struct spdk_rdma_qp {
@ -61,6 +62,14 @@ struct spdk_rdma_qp {
struct spdk_rdma_qp *spdk_rdma_qp_create(struct rdma_cm_id *cm_id,
struct spdk_rdma_qp_init_attr *qp_attr);
/**
* Complete the connection process, must be called by the active
* side (NVMEoF initiator) upon receipt RDMA_CM_EVENT_CONNECT_RESPONSE
* @param spdk_rdma_qp pointer to a qpair
* @return 0 on success, errno on failure
*/
int spdk_rdma_qp_complete_connect(struct spdk_rdma_qp *spdk_rdma_qp);
/**
* Destroy RDMA provider specific qpair
* \param spdk_rdma_qp Pointer to qpair to be destroyed

View File

@ -332,13 +332,14 @@ nvme_rdma_qpair_process_cm_event(struct nvme_rdma_qpair *rqpair)
break;
case RDMA_CM_EVENT_CONNECT_REQUEST:
break;
case RDMA_CM_EVENT_CONNECT_RESPONSE:
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
break;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_REJECTED:
break;
case RDMA_CM_EVENT_CONNECT_RESPONSE:
rc = spdk_rdma_qp_complete_connect(rqpair->rdma_qp);
/* fall through */
case RDMA_CM_EVENT_ESTABLISHED:
accept_data = (struct spdk_nvmf_rdma_accept_private_data *)event->param.conn.private_data;
if (accept_data == NULL) {
@ -443,6 +444,13 @@ nvme_rdma_validate_cm_event(enum rdma_cm_event_type expected_evt_type,
*/
if (reaped_evt->event == RDMA_CM_EVENT_REJECTED && reaped_evt->status == 10) {
rc = -ESTALE;
} else if (reaped_evt->event == RDMA_CM_EVENT_CONNECT_RESPONSE) {
/*
* If we are using a qpair which is not created using rdma cm API
* then we will receive RDMA_CM_EVENT_CONNECT_RESPONSE instead of
* RDMA_CM_EVENT_ESTABLISHED.
*/
return 0;
}
break;
default:
@ -530,6 +538,7 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
attr.cap.max_recv_wr = rqpair->num_entries; /* RECV operations */
attr.cap.max_send_sge = spdk_min(NVME_RDMA_DEFAULT_TX_SGE, dev_attr.max_sge);
attr.cap.max_recv_sge = spdk_min(NVME_RDMA_DEFAULT_RX_SGE, dev_attr.max_sge);
attr.initiator_side = true;
rqpair->rdma_qp = spdk_rdma_qp_create(rqpair->cm_id, &attr);
@ -541,7 +550,7 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
rqpair->max_send_sge = spdk_min(NVME_RDMA_DEFAULT_TX_SGE, attr.cap.max_send_sge);
rqpair->max_recv_sge = spdk_min(NVME_RDMA_DEFAULT_RX_SGE, attr.cap.max_recv_sge);
rctrlr->pd = rqpair->cm_id->qp->pd;
rctrlr->pd = rqpair->rdma_qp->qp->pd;
rqpair->cm_id->context = &rqpair->qpair;
@ -555,7 +564,7 @@ nvme_rdma_qpair_submit_sends(struct nvme_rdma_qpair *rqpair)
int rc;
if (rqpair->sends_to_post.first) {
rc = ibv_post_send(rqpair->cm_id->qp, rqpair->sends_to_post.first, &bad_send_wr);
rc = ibv_post_send(rqpair->rdma_qp->qp, rqpair->sends_to_post.first, &bad_send_wr);
if (spdk_unlikely(rc)) {
SPDK_ERRLOG("Failed to post WRs on send queue, errno %d (%s), bad_wr %p\n",
rc, spdk_strerror(rc), bad_send_wr);
@ -579,7 +588,7 @@ nvme_rdma_qpair_submit_recvs(struct nvme_rdma_qpair *rqpair)
int rc;
if (rqpair->recvs_to_post.first) {
rc = ibv_post_recv(rqpair->cm_id->qp, rqpair->recvs_to_post.first, &bad_recv_wr);
rc = ibv_post_recv(rqpair->rdma_qp->qp, rqpair->recvs_to_post.first, &bad_recv_wr);
if (spdk_unlikely(rc)) {
SPDK_ERRLOG("Failed to post WRs on receive queue, errno %d (%s), bad_wr %p\n",
rc, spdk_strerror(rc), bad_recv_wr);
@ -933,6 +942,11 @@ nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
param.retry_count = ctrlr->opts.transport_retry_count;
param.rnr_retry_count = 7;
/* Fields below are ignored by rdma cm if qpair has been
* created using rdma cm API. */
param.srq = 0;
param.qp_num = rqpair->rdma_qp->qp->qp_num;
ret = rdma_connect(rqpair->cm_id, &param);
if (ret) {
SPDK_ERRLOG("nvme rdma connect error\n");
@ -944,7 +958,7 @@ nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
SPDK_NOTICELOG("Received a stale connection notice during connection.\n");
return -EAGAIN;
} else if (ret) {
SPDK_ERRLOG("RDMA connect error\n");
SPDK_ERRLOG("RDMA connect error %d\n", ret);
return -1;
} else {
return 0;
@ -1033,7 +1047,7 @@ nvme_rdma_check_contiguous_entries(uint64_t addr_1, uint64_t addr_2)
static int
nvme_rdma_register_mem(struct nvme_rdma_qpair *rqpair)
{
struct ibv_pd *pd = rqpair->cm_id->qp->pd;
struct ibv_pd *pd = rqpair->rdma_qp->qp->pd;
struct spdk_nvme_rdma_mr_map *mr_map;
const struct spdk_mem_map_ops nvme_rdma_map_ops = {
.notify_cb = nvme_rdma_mr_map_notify,

View File

@ -566,7 +566,7 @@ nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
int rc;
old_state = rqpair->ibv_state;
rc = ibv_query_qp(rqpair->cm_id->qp, &qp_attr,
rc = ibv_query_qp(rqpair->rdma_qp->qp, &qp_attr,
g_spdk_nvmf_ibv_query_mask, &init_attr);
if (rc)
@ -978,6 +978,7 @@ nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
qp_init_attr.cap.max_send_wr = (uint32_t)rqpair->max_queue_depth * 2;
qp_init_attr.cap.max_send_sge = spdk_min((uint32_t)device->attr.max_sge, NVMF_DEFAULT_TX_SGE);
qp_init_attr.cap.max_recv_sge = spdk_min((uint32_t)device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
qp_init_attr.initiator_side = false;
if (rqpair->srq == NULL && nvmf_rdma_resize_cq(rqpair, device) < 0) {
SPDK_ERRLOG("Failed to resize the completion queue. Cannot initialize qpair.\n");
@ -1003,7 +1004,7 @@ nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
transport = &rtransport->transport;
opts.qp = rqpair->cm_id->qp;
opts.qp = rqpair->rdma_qp->qp;
opts.pd = rqpair->cm_id->pd;
opts.qpair = rqpair;
opts.shared = false;
@ -3592,7 +3593,7 @@ get_rdma_qpair_from_wc(struct spdk_nvmf_rdma_poller *rpoller, struct ibv_wc *wc)
struct spdk_nvmf_rdma_qpair *rqpair;
/* @todo: improve QP search */
TAILQ_FOREACH(rqpair, &rpoller->qpairs, link) {
if (wc->qp_num == rqpair->cm_id->qp->qp_num) {
if (wc->qp_num == rqpair->rdma_qp->qp->qp_num) {
return rqpair;
}
}
@ -3660,7 +3661,7 @@ _poller_submit_recvs(struct spdk_nvmf_rdma_transport *rtransport,
while (!STAILQ_EMPTY(&rpoller->qpairs_pending_recv)) {
rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_recv);
assert(rqpair->resources->recvs_to_post.first != NULL);
rc = ibv_post_recv(rqpair->cm_id->qp, rqpair->resources->recvs_to_post.first, &bad_recv_wr);
rc = ibv_post_recv(rqpair->rdma_qp->qp, rqpair->resources->recvs_to_post.first, &bad_recv_wr);
if (rc) {
_qp_reset_failed_recvs(rqpair, bad_recv_wr, rc);
}
@ -3743,7 +3744,7 @@ _poller_submit_sends(struct spdk_nvmf_rdma_transport *rtransport,
while (!STAILQ_EMPTY(&rpoller->qpairs_pending_send)) {
rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_send);
assert(rqpair->sends_to_post.first != NULL);
rc = ibv_post_send(rqpair->cm_id->qp, rqpair->sends_to_post.first, &bad_wr);
rc = ibv_post_send(rqpair->rdma_qp->qp, rqpair->sends_to_post.first, &bad_wr);
/* bad wr always points to the first wr that failed. */
if (rc) {

View File

@ -41,7 +41,15 @@ SO_SUFFIX := $(SO_VER).$(SO_MINOR)
SPDK_MAP_FILE = $(abspath $(CURDIR)/spdk_rdma.map)
LIBNAME = rdma
ifeq ($(CONFIG_RDMA_PROV),verbs)
C_SRCS = rdma_verbs.c
else ifeq ($(CONFIG_RDMA_PROV),mlx5_dv)
C_SRCS = rdma_mlx5_dv.c
LOCAL_SYS_LIBS += -lmlx5
else
$(error Wrong RDMA provider specified: $(CONFIG_RDMA_PROV))
endif
LOCAL_SYS_LIBS += -libverbs -lrdmacm
#Attach only if FreeBSD and RDMA is specified with configure

226
lib/rdma/rdma_mlx5_dv.c Normal file
View File

@ -0,0 +1,226 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rdma/rdma_cma.h>
#include <infiniband/mlx5dv.h>
#include "spdk/stdinc.h"
#include "spdk/string.h"
#include "spdk/likely.h"
#include "spdk_internal/rdma.h"
#include "spdk_internal/log.h"
struct spdk_rdma_mlx5_dv_qp {
struct spdk_rdma_qp common;
struct ibv_qp_ex *qpex;
bool initiator_side;
};
static int
rdma_mlx5_dv_init_qpair(struct spdk_rdma_mlx5_dv_qp *mlx5_qp)
{
struct ibv_qp_attr qp_attr;
int qp_attr_mask, rc;
qp_attr.qp_state = IBV_QPS_INIT;
rc = rdma_init_qp_attr(mlx5_qp->common.cm_id, &qp_attr, &qp_attr_mask);
if (rc) {
SPDK_ERRLOG("Failed to init attr IBV_QPS_INIT, errno %s (%d)\n", spdk_strerror(errno), errno);
return rc;
}
rc = ibv_modify_qp(mlx5_qp->common.qp, &qp_attr, qp_attr_mask);
if (rc) {
SPDK_ERRLOG("ibv_modify_qp(IBV_QPS_INIT) failed, rc %d\n", rc);
return rc;
}
qp_attr.qp_state = IBV_QPS_RTR;
rc = rdma_init_qp_attr(mlx5_qp->common.cm_id, &qp_attr, &qp_attr_mask);
if (rc) {
SPDK_ERRLOG("Failed to init attr IBV_QPS_RTR, errno %s (%d)\n", spdk_strerror(errno), errno);
return rc;
}
rc = ibv_modify_qp(mlx5_qp->common.qp, &qp_attr, qp_attr_mask);
if (rc) {
SPDK_ERRLOG("ibv_modify_qp(IBV_QPS_RTR) failed, rc %d\n", rc);
return rc;
}
qp_attr.qp_state = IBV_QPS_RTS;
rc = rdma_init_qp_attr(mlx5_qp->common.cm_id, &qp_attr, &qp_attr_mask);
if (rc) {
SPDK_ERRLOG("Failed to init attr IBV_QPS_RTR, errno %s (%d)\n", spdk_strerror(errno), errno);
return rc;
}
rc = ibv_modify_qp(mlx5_qp->common.qp, &qp_attr, qp_attr_mask);
if (rc) {
SPDK_ERRLOG("ibv_modify_qp(IBV_QPS_RTS) failed, rc %d\n", rc);
}
return rc;
}
struct spdk_rdma_qp *
spdk_rdma_qp_create(struct rdma_cm_id *cm_id, struct spdk_rdma_qp_init_attr *qp_attr)
{
assert(cm_id);
assert(qp_attr);
struct ibv_qp *qp;
struct spdk_rdma_mlx5_dv_qp *mlx5_qp;
struct ibv_qp_init_attr_ex dv_qp_attr = {
.qp_context = qp_attr->qp_context,
.send_cq = qp_attr->send_cq,
.recv_cq = qp_attr->recv_cq,
.srq = qp_attr->srq,
.cap = qp_attr->cap,
.qp_type = IBV_QPT_RC,
.comp_mask = IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_SEND_OPS_FLAGS,
.pd = qp_attr->pd ? qp_attr->pd : cm_id->pd
};
assert(dv_qp_attr.pd);
mlx5_qp = calloc(1, sizeof(*mlx5_qp));
if (!mlx5_qp) {
SPDK_ERRLOG("qp memory allocation failed\n");
return NULL;
}
qp = mlx5dv_create_qp(cm_id->verbs, &dv_qp_attr, NULL);
if (!qp) {
SPDK_ERRLOG("Failed to create qpair, errno %s (%d)\n", spdk_strerror(errno), errno);
free(mlx5_qp);
return NULL;
}
mlx5_qp->common.qp = qp;
mlx5_qp->common.cm_id = cm_id;
mlx5_qp->qpex = ibv_qp_to_qp_ex(qp);
mlx5_qp->initiator_side = qp_attr->initiator_side;
if (!mlx5_qp->qpex) {
spdk_rdma_qp_destroy(&mlx5_qp->common);
return NULL;
}
/* NVMEoF target must move qpair to RTS state */
if (!mlx5_qp->initiator_side && rdma_mlx5_dv_init_qpair(mlx5_qp) != 0) {
SPDK_ERRLOG("Failed to initialize qpair\n");
spdk_rdma_qp_destroy(&mlx5_qp->common);
return NULL;
}
qp_attr->cap = dv_qp_attr.cap;
return &mlx5_qp->common;
}
int
spdk_rdma_qp_complete_connect(struct spdk_rdma_qp *spdk_rdma_qp)
{
struct spdk_rdma_mlx5_dv_qp *mlx5_qp;
int rc;
assert(spdk_rdma_qp);
mlx5_qp = SPDK_CONTAINEROF(spdk_rdma_qp, struct spdk_rdma_mlx5_dv_qp, common);
if (!mlx5_qp->initiator_side) {
return 0;
}
rc = rdma_mlx5_dv_init_qpair(mlx5_qp);
if (rc) {
SPDK_ERRLOG("Failed to initialize qpair\n");
return rc;
}
rc = rdma_establish(mlx5_qp->common.cm_id);
if (rc) {
SPDK_ERRLOG("rdma_establish failed, errno %s (%d)\n", spdk_strerror(errno), errno);
}
return rc;
}
void
spdk_rdma_qp_destroy(struct spdk_rdma_qp *spdk_rdma_qp)
{
struct spdk_rdma_mlx5_dv_qp *mlx5_qp;
int rc;
assert(spdk_rdma_qp != NULL);
mlx5_qp = SPDK_CONTAINEROF(spdk_rdma_qp, struct spdk_rdma_mlx5_dv_qp, common);
if (mlx5_qp->common.qp) {
rc = ibv_destroy_qp(mlx5_qp->common.qp);
if (rc) {
SPDK_ERRLOG("Failed to destroy ibv qp %p, rc %d\n", mlx5_qp->common.qp, rc);
}
}
free(mlx5_qp);
}
int
spdk_rdma_qp_disconnect(struct spdk_rdma_qp *spdk_rdma_qp)
{
int rc = 0;
assert(spdk_rdma_qp != NULL);
if (spdk_rdma_qp->qp) {
struct ibv_qp_attr qp_attr = { .qp_state = IBV_QPS_ERR };
rc = ibv_modify_qp(spdk_rdma_qp->qp, &qp_attr, IBV_QP_STATE);
if (rc) {
SPDK_ERRLOG("Failed to modify ibv qp %p state to ERR, rc %d\n", spdk_rdma_qp->qp, rc);
return rc;
}
}
if (spdk_rdma_qp->cm_id) {
rc = rdma_disconnect(spdk_rdma_qp->cm_id);
if (rc) {
SPDK_ERRLOG("rdma_disconnect failed, errno %s (%d)\n", spdk_strerror(errno), errno);
}
}
return rc;
}

View File

@ -74,6 +74,13 @@ spdk_rdma_qp_create(struct rdma_cm_id *cm_id, struct spdk_rdma_qp_init_attr *qp_
return spdk_rdma_qp;
}
int
spdk_rdma_qp_complete_connect(struct spdk_rdma_qp *spdk_rdma_qp)
{
/* Nothing to be done for Verbs */
return 0;
}
void
spdk_rdma_qp_destroy(struct spdk_rdma_qp *spdk_rdma_qp)
{

View File

@ -3,6 +3,7 @@
# Public functions
spdk_rdma_qp_create;
spdk_rdma_qp_complete_connect;
spdk_rdma_qp_destroy;
spdk_rdma_qp_disconnect;

View File

@ -51,8 +51,11 @@ SYS_LIBS += -lpmem
endif
ifeq ($(CONFIG_RDMA),y)
SYS_LIBS += -libverbs -lrdmacm
BLOCKDEV_MODULES_LIST += rdma
SYS_LIBS += -libverbs -lrdmacm
ifeq ($(CONFIG_RDMA_PROV),mlx5_dv)
SYS_LIBS += -lmlx5
endif
endif
ifeq ($(OS),Linux)

View File

@ -38,5 +38,6 @@
DEFINE_STUB(spdk_rdma_qp_create, struct spdk_rdma_qp *, (struct rdma_cm_id *cm_id,
struct spdk_rdma_qp_init_attr *qp_attr), NULL);
DEFINE_STUB(spdk_rdma_qp_complete_connect, int, (struct spdk_rdma_qp *spdk_rdma_qp), 0);
DEFINE_STUB_V(spdk_rdma_qp_destroy, (struct spdk_rdma_qp *spdk_rdma_qp));
DEFINE_STUB(spdk_rdma_qp_disconnect, int, (struct spdk_rdma_qp *spdk_rdma_qp), 0);

View File

@ -16,6 +16,9 @@ test/unit/lib/nvmf/fc_ls.c/fc_ls_ut
# Not configured for Neon testing
lib/util/base64_neon
# Not configured for mlx5 dv testing
lib/rdma/rdma_mlx5_dv
# Files related to testing our internal vhost implementation.
lib/rte_vhost/fd_man
lib/rte_vhost/socket