nvme: move num_entries to transport-specific qpairs

The spdk_nvme_qpair::num_entries value is never used in the common code,
so move it to the individual transport qpairs to make it clear that it
is a transport-specific implementation detail.

Change-Id: I5c8f0de4fcd808912ba6d248cf5cee816079fd32
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2017-01-05 09:54:00 -07:00
parent 7ac9a4ecbb
commit df8129fb39
6 changed files with 49 additions and 49 deletions

View File

@ -247,8 +247,6 @@ struct spdk_nvme_qpair {
uint16_t id;
uint16_t num_entries;
uint8_t qprio;
struct spdk_nvme_ctrlr *ctrlr;
@ -531,7 +529,6 @@ int nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
int nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
void nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap);
int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
uint16_t num_entries,
struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio);
void nvme_qpair_enable(struct spdk_nvme_qpair *qpair);

View File

@ -157,6 +157,8 @@ struct nvme_pcie_qpair {
/* Array of trackers indexed by command ID. */
struct nvme_tracker *tr;
uint16_t num_entries;
uint16_t sq_tail;
uint16_t cq_head;
@ -568,11 +570,12 @@ nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
return -ENOMEM;
}
pqpair->num_entries = NVME_ADMIN_ENTRIES;
ctrlr->adminq = &pqpair->qpair;
rc = nvme_qpair_init(ctrlr->adminq,
0, /* qpair ID */
NVME_ADMIN_ENTRIES,
ctrlr,
SPDK_NVME_QPRIO_URGENT);
if (rc != 0) {
@ -747,8 +750,8 @@ nvme_pcie_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
aqa.raw = 0;
/* acqs and asqs are 0-based. */
aqa.bits.acqs = ctrlr->adminq->num_entries - 1;
aqa.bits.asqs = ctrlr->adminq->num_entries - 1;
aqa.bits.acqs = nvme_pcie_qpair(ctrlr->adminq)->num_entries - 1;
aqa.bits.asqs = nvme_pcie_qpair(ctrlr->adminq)->num_entries - 1;
if (nvme_pcie_ctrlr_set_aqa(pctrlr, &aqa)) {
SPDK_ERRLOG("set_aqa() failed\n");
@ -801,9 +804,9 @@ nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
pqpair->phase = 1;
memset(pqpair->cmd, 0,
qpair->num_entries * sizeof(struct spdk_nvme_cmd));
pqpair->num_entries * sizeof(struct spdk_nvme_cmd));
memset(pqpair->cpl, 0,
qpair->num_entries * sizeof(struct spdk_nvme_cpl));
pqpair->num_entries * sizeof(struct spdk_nvme_cpl));
return 0;
}
@ -829,7 +832,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
* Note also that for a queue size of N, we can only have (N-1)
* commands outstanding, hence the "-1" here.
*/
num_trackers = nvme_min(NVME_IO_TRACKERS, qpair->num_entries - 1);
num_trackers = nvme_min(NVME_IO_TRACKERS, pqpair->num_entries - 1);
}
assert(num_trackers != 0);
@ -838,7 +841,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
/* cmd and cpl rings must be aligned on 4KB boundaries. */
if (ctrlr->opts.use_cmb_sqs) {
if (nvme_pcie_ctrlr_alloc_cmb(ctrlr, qpair->num_entries * sizeof(struct spdk_nvme_cmd),
if (nvme_pcie_ctrlr_alloc_cmb(ctrlr, pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
0x1000, &offset) == 0) {
pqpair->cmd = pctrlr->cmb_bar_virt_addr + offset;
pqpair->cmd_bus_addr = pctrlr->cmb_bar_phys_addr + offset;
@ -846,7 +849,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
}
}
if (pqpair->sq_in_cmb == false) {
pqpair->cmd = spdk_zmalloc(qpair->num_entries * sizeof(struct spdk_nvme_cmd),
pqpair->cmd = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
0x1000,
&pqpair->cmd_bus_addr);
if (pqpair->cmd == NULL) {
@ -855,7 +858,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
}
}
pqpair->cpl = spdk_zmalloc(qpair->num_entries * sizeof(struct spdk_nvme_cpl),
pqpair->cpl = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cpl),
0x1000,
&pqpair->cpl_bus_addr);
if (pqpair->cpl == NULL) {
@ -1014,7 +1017,7 @@ nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracke
/* Copy the command from the tracker to the submission queue. */
nvme_pcie_copy_command(&pqpair->cmd[pqpair->sq_tail], &req->cmd);
if (++pqpair->sq_tail == qpair->num_entries) {
if (++pqpair->sq_tail == pqpair->num_entries) {
pqpair->sq_tail = 0;
}
@ -1258,7 +1261,7 @@ nvme_pcie_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
* TODO: create a create io completion queue command data
* structure.
*/
cmd->cdw10 = ((io_que->num_entries - 1) << 16) | io_que->id;
cmd->cdw10 = ((pqpair->num_entries - 1) << 16) | io_que->id;
/*
* 0x2 = interrupts enabled
* 0x1 = physically contiguous
@ -1289,7 +1292,7 @@ nvme_pcie_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
* TODO: create a create io submission queue command data
* structure.
*/
cmd->cdw10 = ((io_que->num_entries - 1) << 16) | io_que->id;
cmd->cdw10 = ((pqpair->num_entries - 1) << 16) | io_que->id;
/* 0x1 = physically contiguous */
cmd->cdw11 = (io_que->id << 16) | (io_que->qprio << 1) | 0x1;
cmd->dptr.prp.prp1 = pqpair->cmd_bus_addr;
@ -1399,9 +1402,11 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
return NULL;
}
pqpair->num_entries = ctrlr->opts.io_queue_size;
qpair = &pqpair->qpair;
rc = nvme_qpair_init(qpair, qid, ctrlr->opts.io_queue_size, ctrlr, qprio);
rc = nvme_qpair_init(qpair, qid, ctrlr, qprio);
if (rc != 0) {
nvme_pcie_qpair_destroy(qpair);
return NULL;
@ -1857,14 +1862,14 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
}
if (max_completions == 0 || (max_completions > (qpair->num_entries - 1U))) {
if (max_completions == 0 || (max_completions > (pqpair->num_entries - 1U))) {
/*
* max_completions == 0 means unlimited, but complete at most one
* queue depth batch of I/O at a time so that the completion
* queue doorbells don't wrap around.
*/
max_completions = qpair->num_entries - 1;
max_completions = pqpair->num_entries - 1;
}
while (1) {
@ -1883,7 +1888,7 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
assert(0);
}
if (++pqpair->cq_head == qpair->num_entries) {
if (++pqpair->cq_head == pqpair->num_entries) {
pqpair->cq_head = 0;
pqpair->phase = !pqpair->phase;
}

View File

@ -341,14 +341,10 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
int
nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
uint16_t num_entries,
struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio)
{
assert(num_entries != 0);
qpair->id = id;
qpair->num_entries = num_entries;
qpair->qprio = qprio;
qpair->ctrlr = ctrlr;

View File

@ -87,7 +87,9 @@ struct nvme_rdma_qpair {
struct spdk_nvme_rdma_req *rdma_reqs;
/* Parallel arrays of response buffers + response SGLs of size qpair.num_entries */
uint16_t num_entries;
/* Parallel arrays of response buffers + response SGLs of size num_entries */
struct ibv_sge *rsp_sgls;
struct spdk_nvme_cpl *rsps;
@ -97,7 +99,7 @@ struct nvme_rdma_qpair {
struct ibv_mr *rsp_mr;
/*
* Array of qpair.num_entries NVMe commands registered as RDMA message buffers.
* Array of num_entries NVMe commands registered as RDMA message buffers.
* Indexed by rdma_req->id.
*/
struct spdk_nvme_cmd *cmds;
@ -203,8 +205,8 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
memset(&attr, 0, sizeof(struct ibv_qp_init_attr));
attr.qp_type = IBV_QPT_RC;
attr.cap.max_send_wr = rqpair->qpair.num_entries; /* SEND operations */
attr.cap.max_recv_wr = rqpair->qpair.num_entries; /* RECV operations */
attr.cap.max_send_wr = rqpair->num_entries; /* SEND operations */
attr.cap.max_recv_wr = rqpair->num_entries; /* RECV operations */
attr.cap.max_send_sge = NVME_RDMA_DEFAULT_TX_SGE;
attr.cap.max_recv_sge = NVME_RDMA_DEFAULT_RX_SGE;
@ -330,33 +332,33 @@ nvme_rdma_alloc_rsps(struct nvme_rdma_qpair *rqpair)
rqpair->rsps = NULL;
rqpair->rsp_recv_wrs = NULL;
rqpair->rsp_sgls = calloc(rqpair->qpair.num_entries, sizeof(*rqpair->rsp_sgls));
rqpair->rsp_sgls = calloc(rqpair->num_entries, sizeof(*rqpair->rsp_sgls));
if (!rqpair->rsp_sgls) {
SPDK_ERRLOG("Failed to allocate rsp_sgls\n");
goto fail;
}
rqpair->rsp_recv_wrs = calloc(rqpair->qpair.num_entries,
rqpair->rsp_recv_wrs = calloc(rqpair->num_entries,
sizeof(*rqpair->rsp_recv_wrs));
if (!rqpair->rsp_recv_wrs) {
SPDK_ERRLOG("Failed to allocate rsp_recv_wrs\n");
goto fail;
}
rqpair->rsps = calloc(rqpair->qpair.num_entries, sizeof(*rqpair->rsps));
rqpair->rsps = calloc(rqpair->num_entries, sizeof(*rqpair->rsps));
if (!rqpair->rsps) {
SPDK_ERRLOG("can not allocate rdma rsps\n");
goto fail;
}
rqpair->rsp_mr = rdma_reg_msgs(rqpair->cm_id, rqpair->rsps,
rqpair->qpair.num_entries * sizeof(*rqpair->rsps));
rqpair->num_entries * sizeof(*rqpair->rsps));
if (rqpair->rsp_mr == NULL) {
SPDK_ERRLOG("Unable to register rsp_mr\n");
goto fail;
}
for (i = 0; i < rqpair->qpair.num_entries; i++) {
for (i = 0; i < rqpair->num_entries; i++) {
struct ibv_sge *rsp_sgl = &rqpair->rsp_sgls[i];
rsp_sgl->addr = (uint64_t)&rqpair->rsps[i];
@ -386,7 +388,7 @@ nvme_rdma_free_reqs(struct nvme_rdma_qpair *rqpair)
return;
}
for (i = 0; i < rqpair->qpair.num_entries; i++) {
for (i = 0; i < rqpair->num_entries; i++) {
rdma_req = &rqpair->rdma_reqs[i];
if (rdma_req->bb_mr && ibv_dereg_mr(rdma_req->bb_mr)) {
@ -415,27 +417,27 @@ nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair)
{
int i;
rqpair->rdma_reqs = calloc(rqpair->qpair.num_entries, sizeof(struct spdk_nvme_rdma_req));
rqpair->rdma_reqs = calloc(rqpair->num_entries, sizeof(struct spdk_nvme_rdma_req));
if (rqpair->rdma_reqs == NULL) {
SPDK_ERRLOG("Failed to allocate rdma_reqs\n");
goto fail;
}
rqpair->cmds = calloc(rqpair->qpair.num_entries, sizeof(*rqpair->cmds));
rqpair->cmds = calloc(rqpair->num_entries, sizeof(*rqpair->cmds));
if (!rqpair->cmds) {
SPDK_ERRLOG("Failed to allocate RDMA cmds\n");
goto fail;
}
rqpair->cmd_mr = rdma_reg_msgs(rqpair->cm_id, rqpair->cmds,
rqpair->qpair.num_entries * sizeof(*rqpair->cmds));
rqpair->num_entries * sizeof(*rqpair->cmds));
if (!rqpair->cmd_mr) {
SPDK_ERRLOG("Unable to register cmd_mr\n");
goto fail;
}
STAILQ_INIT(&rqpair->free_reqs);
for (i = 0; i < rqpair->qpair.num_entries; i++) {
for (i = 0; i < rqpair->num_entries; i++) {
struct spdk_nvme_rdma_req *rdma_req;
struct spdk_nvme_cmd *cmd;
@ -483,7 +485,7 @@ nvme_rdma_recv(struct nvme_rdma_qpair *rqpair, uint64_t rsp_idx)
struct spdk_nvme_cpl *rsp;
struct nvme_request *req;
assert(rsp_idx < rqpair->qpair.num_entries);
assert(rsp_idx < rqpair->num_entries);
rsp = &rqpair->rsps[rsp_idx];
rdma_req = &rqpair->rdma_reqs[rsp->cid];
@ -560,11 +562,11 @@ nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
return ret;
}
param.responder_resources = nvme_min(rqpair->qpair.num_entries, attr.max_qp_rd_atom);
param.responder_resources = nvme_min(rqpair->num_entries, attr.max_qp_rd_atom);
request_data.qid = rqpair->qpair.id;
request_data.hrqsize = rqpair->qpair.num_entries;
request_data.hsqsize = rqpair->qpair.num_entries - 1;
request_data.hrqsize = rqpair->num_entries;
request_data.hsqsize = rqpair->num_entries - 1;
param.private_data = &request_data;
param.private_data_len = sizeof(request_data);
@ -588,9 +590,9 @@ nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
}
SPDK_TRACELOG(SPDK_TRACE_NVME, "Requested queue depth %d. Actually got queue depth %d.\n",
rqpair->qpair.num_entries, accept_data->crqsize);
rqpair->num_entries, accept_data->crqsize);
rqpair->qpair.num_entries = nvme_min(rqpair->qpair.num_entries , accept_data->crqsize);
rqpair->num_entries = nvme_min(rqpair->num_entries , accept_data->crqsize);
rdma_ack_cm_event(event);
@ -656,7 +658,7 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
cmd.opcode = SPDK_NVME_OPC_FABRIC;
cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
cmd.qid = rqpair->qpair.id;
cmd.sqsize = rqpair->qpair.num_entries - 1;
cmd.sqsize = rqpair->num_entries - 1;
cmd.kato = ctrlr->opts.keep_alive_timeout_ms;
if (nvme_qpair_is_admin_queue(&rqpair->qpair)) {
@ -914,9 +916,11 @@ nvme_rdma_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
return NULL;
}
rqpair->num_entries = qsize;
qpair = &rqpair->qpair;
rc = nvme_qpair_init(qpair, qid, qsize, ctrlr, qprio);
rc = nvme_qpair_init(qpair, qid, ctrlr, qprio);
if (rc != 0) {
return NULL;
}
@ -1320,7 +1324,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
uint32_t io_completed = 0;
rqpair = nvme_rdma_qpair(qpair);
size = qpair->num_entries - 1U;
size = rqpair->num_entries - 1U;
if (!max_completions || max_completions > size) {
max_completions = size;
}

View File

@ -165,12 +165,10 @@ nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
}
int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
uint16_t num_entries,
struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio)
{
qpair->id = id;
qpair->num_entries = num_entries;
qpair->qprio = qprio;
qpair->ctrlr = ctrlr;

View File

@ -223,7 +223,7 @@ prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
ctrlr->free_io_qids = NULL;
TAILQ_INIT(&ctrlr->active_io_qpairs);
TAILQ_INIT(&ctrlr->active_procs);
nvme_qpair_init(qpair, 1, 128, ctrlr, 0);
nvme_qpair_init(qpair, 1, ctrlr, 0);
ut_fail_vtophys = false;
}