nvmf/rdma: Solve the qpair thread and channel issue.
According to the current code, rdma qpair is always created by the thread on acceptor_core, thus we need to change the related I/O channel during polling if the core configured for subsystem is not same with the acceptor core.\ With this patch, we can run NVMe-oF tgt with multiple cores, and each subsystem can configure different core to handle. Change-Id: I6163a871f65115e545a4f3fd9cc46b3bafb13249 Signed-off-by: Ziye Yang <optimistyzy@gmail.com> Reviewed-on: https://review.gerrithub.io/383683 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
ccf8cb1834
commit
5b707a6e87
@ -198,6 +198,7 @@ struct spdk_nvmf_rdma_qpair {
|
||||
/* Mgmt channel */
|
||||
struct spdk_io_channel *mgmt_channel;
|
||||
struct spdk_nvmf_rdma_mgmt_channel *ch;
|
||||
struct spdk_thread *thread;
|
||||
};
|
||||
|
||||
/* List of RDMA connections that have not yet received a CONNECT capsule */
|
||||
@ -278,6 +279,21 @@ spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf)
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_nvmf_rdma_qpair_allocate_channel(struct spdk_nvmf_rdma_qpair *rdma_qpair,
|
||||
struct spdk_nvmf_rdma_transport *rtransport)
|
||||
{
|
||||
rdma_qpair->mgmt_channel = spdk_get_io_channel(rtransport);
|
||||
if (!rdma_qpair->mgmt_channel) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
rdma_qpair->thread = spdk_get_thread();
|
||||
rdma_qpair->ch = spdk_io_channel_get_ctx(rdma_qpair->mgmt_channel);
|
||||
assert(rdma_qpair->ch != NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rdma_qpair)
|
||||
{
|
||||
@ -683,14 +699,11 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
||||
* is received. */
|
||||
TAILQ_INSERT_TAIL(&g_pending_conns, rdma_qpair, pending_link);
|
||||
|
||||
rdma_qpair->mgmt_channel = spdk_get_io_channel(rtransport);
|
||||
if (!rdma_qpair->mgmt_channel) {
|
||||
rc = spdk_nvmf_rdma_qpair_allocate_channel(rdma_qpair, rtransport);
|
||||
if (rc) {
|
||||
goto err2;
|
||||
}
|
||||
|
||||
rdma_qpair->ch = spdk_io_channel_get_ctx(rdma_qpair->mgmt_channel);
|
||||
assert(rdma_qpair->ch != NULL);
|
||||
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
@ -1428,6 +1441,8 @@ spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
||||
TAILQ_REMOVE(&g_pending_conns, rdma_qpair, pending_link);
|
||||
spdk_nvmf_rdma_qpair_destroy(rdma_qpair);
|
||||
} else if (rc > 0) {
|
||||
spdk_put_io_channel(rdma_qpair->mgmt_channel);
|
||||
rdma_qpair->mgmt_channel = NULL;
|
||||
/* At least one request was processed which is assumed to be
|
||||
* a CONNECT. Remove this connection from our list. */
|
||||
TAILQ_REMOVE(&g_pending_conns, rdma_qpair, pending_link);
|
||||
@ -1735,6 +1750,15 @@ spdk_nvmf_rdma_qpair_poll(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
bool error = false;
|
||||
char buf[64];
|
||||
|
||||
/* reset the mgmt_channel and thread info of qpair */
|
||||
if (rqpair->mgmt_channel != NULL) {
|
||||
if (rqpair->thread != spdk_get_thread()) {
|
||||
return 0;
|
||||
}
|
||||
} else if (spdk_nvmf_rdma_qpair_allocate_channel(rqpair, rtransport)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Poll for completing operations. */
|
||||
reaped = ibv_poll_cq(rqpair->cq, 32, wc);
|
||||
if (reaped < 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user