nvmf/vfio-user: assign IO SQs which use shared CQ into same poll group

We will assign each SQ with different poll group in round
robin way by default, this may cause race condition to
post completions to one CQ in different threads, so here
we will assign the SQs which share one CQ into same poll
group.

Also enable multiple cores NVMe compliance tests so that
to cover shared IO CQ case.

Change-Id: I9d7cc78aaedceed23986d9f89ed945e0eb337e09
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11115
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Changpeng Liu 2022-01-17 20:40:02 +08:00 committed by Jim Harris
parent 2af05cad3c
commit a391e3ad01
2 changed files with 22 additions and 1 deletions

View File

@ -194,6 +194,7 @@ struct nvmf_vfio_user_sq {
};
struct nvmf_vfio_user_cq {
struct spdk_nvmf_transport_poll_group *group;
struct nvme_q cq;
enum nvmf_vfio_user_cq_state cq_state;
uint32_t cq_ref;
@ -1053,6 +1054,7 @@ delete_sq_done(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *
unmap_q(vu_ctrlr, &vu_cq->cq);
vu_cq->cq.size = 0;
vu_cq->cq_state = VFIO_USER_CQ_DELETED;
vu_cq->group = NULL;
}
}
}
@ -1408,6 +1410,7 @@ handle_del_io_q(struct nvmf_vfio_user_ctrlr *ctrlr,
unmap_q(ctrlr, &vu_cq->cq);
vu_cq->cq.size = 0;
vu_cq->cq_state = VFIO_USER_CQ_DELETED;
vu_cq->group = NULL;
} else {
ctx = calloc(1, sizeof(*ctx));
if (!ctx) {
@ -2579,8 +2582,14 @@ nvmf_vfio_user_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
{
struct nvmf_vfio_user_transport *vu_transport;
struct nvmf_vfio_user_poll_group **vu_group;
struct nvmf_vfio_user_sq *vu_sq;
struct nvmf_vfio_user_cq *vu_cq;
struct spdk_nvmf_transport_poll_group *result;
vu_sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
vu_cq = vu_sq->ctrlr->cqs[vu_sq->sq.cqid];
assert(vu_cq != NULL);
vu_transport = SPDK_CONTAINEROF(qpair->transport, struct nvmf_vfio_user_transport, transport);
pthread_mutex_lock(&vu_transport->pg_lock);
@ -2589,6 +2598,14 @@ nvmf_vfio_user_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
return NULL;
}
/* If this is shared IO CQ case, just return the used CQ's poll group */
if (!nvmf_qpair_is_admin_queue(qpair)) {
if (vu_cq->group) {
pthread_mutex_unlock(&vu_transport->pg_lock);
return vu_cq->group;
}
}
vu_group = &vu_transport->next_pg;
assert(*vu_group != NULL);
@ -2598,6 +2615,10 @@ nvmf_vfio_user_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
*vu_group = TAILQ_FIRST(&vu_transport->poll_groups);
}
if (vu_cq->group == NULL) {
vu_cq->group = result;
}
pthread_mutex_unlock(&vu_transport->pg_lock);
return result;
}

View File

@ -15,7 +15,7 @@ export TEST_TRANSPORT=VFIOUSER
rm -rf /var/run/vfio-user
# Start the target
"${NVMF_APP[@]}" -m 0x2 &
"${NVMF_APP[@]}" -m 0x7 &
nvmfpid=$!
echo "Process pid: $nvmfpid"