rdma: add check for allocating too many SRQ.

We could run into issues with this if we were using an arbitrarily large
amount of cores to run SPDK.

Change-Id: Ia7add027d7e6ef1ccb4a69ac328dbdf4f2751fd8
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/452250
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Seth Howell 2019-04-26 11:31:44 -07:00 committed by Jim Harris
parent bbd13b48cb
commit 61948a1ca7

View File

@ -451,6 +451,8 @@ struct spdk_nvmf_rdma_device {
struct spdk_mem_map *map; struct spdk_mem_map *map;
struct ibv_pd *pd; struct ibv_pd *pd;
int num_srq;
TAILQ_ENTRY(spdk_nvmf_rdma_device) link; TAILQ_ENTRY(spdk_nvmf_rdma_device) link;
}; };
@ -2988,9 +2990,10 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
STAILQ_INIT(&poller->qpairs_pending_recv); STAILQ_INIT(&poller->qpairs_pending_recv);
TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link); TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
if (transport->opts.no_srq == false && device->attr.max_srq != 0) { if (transport->opts.no_srq == false && device->num_srq < device->attr.max_srq) {
poller->max_srq_depth = transport->opts.max_srq_depth; poller->max_srq_depth = transport->opts.max_srq_depth;
device->num_srq++;
memset(&srq_init_attr, 0, sizeof(struct ibv_srq_init_attr)); memset(&srq_init_attr, 0, sizeof(struct ibv_srq_init_attr));
srq_init_attr.attr.max_wr = poller->max_srq_depth; srq_init_attr.attr.max_wr = poller->max_srq_depth;
srq_init_attr.attr.max_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE); srq_init_attr.attr.max_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);