rdma: limit the completion queue based on the SRQ.

When we have a shared receive queue, the number of outstanding items
associated with a completion queue is deterministic, and limited by how
many RECVs we have total in the SRQ. So, we can set the total size of
the Completion queue at the beginning of time and never resize it.

Change-Id: I787e4c5bbd52ac8948a323d1301f926f887cd91c
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/447492
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Seth Howell 2019-03-08 11:27:37 -07:00 committed by Jim Harris
parent a5972c6245
commit 7dd3cf441a

View File

@ -826,7 +826,7 @@ spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
rdma_destroy_qp(rqpair->cm_id);
rdma_destroy_id(rqpair->cm_id);
if (rqpair->poller) {
if (rqpair->poller != NULL && rqpair->srq == NULL) {
rqpair->poller->required_num_wr -= MAX_WR_PER_QP(rqpair->max_queue_depth);
}
}
@ -906,7 +906,7 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
ibv_init_attr.cap.max_send_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_TX_SGE);
ibv_init_attr.cap.max_recv_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
if (nvmf_rdma_resize_cq(rqpair, device) < 0) {
if (rqpair->srq == NULL && nvmf_rdma_resize_cq(rqpair, device) < 0) {
SPDK_ERRLOG("Failed to resize the completion queue. Cannot initialize qpair.\n");
goto error;
}
@ -2598,6 +2598,7 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
struct spdk_nvmf_rdma_device *device;
struct ibv_srq_init_attr srq_init_attr;
struct spdk_nvmf_rdma_resource_opts opts;
int num_cqe;
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
@ -2624,15 +2625,6 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
TAILQ_INIT(&poller->qpairs);
poller->cq = ibv_create_cq(device->context, DEFAULT_NVMF_RDMA_CQ_SIZE, poller, NULL, 0);
if (!poller->cq) {
SPDK_ERRLOG("Unable to create completion queue\n");
spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
pthread_mutex_unlock(&rtransport->lock);
return NULL;
}
poller->num_cqe = DEFAULT_NVMF_RDMA_CQ_SIZE;
TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
if (device->attr.max_srq != 0) {
poller->max_srq_depth = transport->opts.max_srq_depth;
@ -2662,6 +2654,27 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
pthread_mutex_unlock(&rtransport->lock);
}
}
/*
* When using an srq, we can limit the completion queue at startup.
* The following formula represents the calculation:
* num_cqe = num_recv + num_data_wr + num_send_wr.
* where num_recv=num_data_wr=and num_send_wr=poller->max_srq_depth
*/
if (poller->srq) {
num_cqe = poller->max_srq_depth * 3;
} else {
num_cqe = DEFAULT_NVMF_RDMA_CQ_SIZE;
}
poller->cq = ibv_create_cq(device->context, num_cqe, poller, NULL, 0);
if (!poller->cq) {
SPDK_ERRLOG("Unable to create completion queue\n");
spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
pthread_mutex_unlock(&rtransport->lock);
return NULL;
}
poller->num_cqe = num_cqe;
}
pthread_mutex_unlock(&rtransport->lock);