nvme: determine io_queue_size at startup
Add a transport callback to return the maximum queue size, and enforce it in the generic nvme_ctrlr layer. This allows the user to tell what io_queue_size was actually selected by the transport via the ctrlr_opts returned during attach_cb. Change-Id: I8a51332cc01c6655e2a3a171bb92877fe48ea267 Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
f505f57b36
commit
988906135c
@ -1307,9 +1307,14 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
|
||||
void
|
||||
nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap)
|
||||
{
|
||||
uint32_t max_io_queue_size = nvme_transport_ctrlr_get_max_io_queue_size(ctrlr);
|
||||
|
||||
ctrlr->cap = *cap;
|
||||
|
||||
ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
|
||||
|
||||
ctrlr->opts.io_queue_size = nvme_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
|
||||
ctrlr->opts.io_queue_size = nvme_min(ctrlr->opts.io_queue_size, max_io_queue_size);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -577,6 +577,7 @@ void nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme
|
||||
int nvme_ ## name ## _ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value); \
|
||||
int nvme_ ## name ## _ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value); \
|
||||
uint32_t nvme_ ## name ## _ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr); \
|
||||
uint32_t nvme_ ## name ## _ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr); \
|
||||
struct spdk_nvme_qpair *nvme_ ## name ## _ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, enum spdk_nvme_qprio qprio); \
|
||||
int nvme_ ## name ## _ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair); \
|
||||
int nvme_ ## name ## _ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair); \
|
||||
|
@ -359,6 +359,12 @@ nvme_pcie_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return NVME_MAX_XFER_SIZE;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
nvme_pcie_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
return NVME_IO_ENTRIES;
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_pcie_ctrlr_map_cmb(struct nvme_pcie_ctrlr *pctrlr)
|
||||
{
|
||||
@ -1321,7 +1327,6 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
||||
{
|
||||
struct nvme_pcie_qpair *pqpair;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
uint32_t num_entries;
|
||||
int rc;
|
||||
|
||||
assert(ctrlr != NULL);
|
||||
@ -1333,15 +1338,7 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
||||
|
||||
qpair = &pqpair->qpair;
|
||||
|
||||
/*
|
||||
* NVMe spec sets a hard limit of 64K max entries, but
|
||||
* devices may specify a smaller limit, so we need to check
|
||||
* the MQES field in the capabilities register.
|
||||
*/
|
||||
num_entries = nvme_min(NVME_IO_ENTRIES, ctrlr->cap.bits.mqes + 1);
|
||||
num_entries = nvme_min(num_entries, ctrlr->opts.io_queue_size);
|
||||
|
||||
rc = nvme_qpair_construct(qpair, qid, num_entries, ctrlr, qprio);
|
||||
rc = nvme_qpair_construct(qpair, qid, ctrlr->opts.io_queue_size, ctrlr, qprio);
|
||||
if (rc != 0) {
|
||||
nvme_pcie_qpair_destroy(qpair);
|
||||
return NULL;
|
||||
|
@ -970,12 +970,9 @@ nvme_rdma_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
||||
{
|
||||
struct nvme_rdma_qpair *rqpair;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
struct nvme_rdma_ctrlr *rctrlr;
|
||||
uint32_t num_entries;
|
||||
int rc;
|
||||
|
||||
rctrlr = nvme_rdma_ctrlr(ctrlr);
|
||||
|
||||
rqpair = calloc(1, sizeof(struct nvme_rdma_qpair));
|
||||
if (!rqpair) {
|
||||
SPDK_ERRLOG("failed to get create rqpair\n");
|
||||
@ -989,9 +986,7 @@ nvme_rdma_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
||||
num_entries = SPDK_NVMF_MIN_ADMIN_QUEUE_ENTRIES;
|
||||
ctrlr->adminq = qpair;
|
||||
} else {
|
||||
num_entries = nvme_min(NVME_HOST_MAX_ENTRIES_PER_QUEUE,
|
||||
ctrlr->cap.bits.mqes + 1);
|
||||
num_entries = nvme_min(num_entries, rctrlr->ctrlr.opts.io_queue_size);
|
||||
num_entries = ctrlr->opts.io_queue_size;
|
||||
}
|
||||
|
||||
rc = nvme_qpair_construct(qpair, qid, num_entries, ctrlr, qprio);
|
||||
@ -1510,3 +1505,9 @@ nvme_rdma_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
/* Todo, which should get from the NVMF target */
|
||||
return NVME_RDMA_RW_BUFFER_SIZE;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
nvme_rdma_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
return NVME_HOST_MAX_ENTRIES_PER_QUEUE;
|
||||
}
|
||||
|
@ -153,6 +153,12 @@ nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_max_xfer_size, (ctrlr));
|
||||
}
|
||||
|
||||
uint32_t
|
||||
nvme_transport_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_get_max_io_queue_size, (ctrlr));
|
||||
}
|
||||
|
||||
struct spdk_nvme_qpair *
|
||||
nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
||||
enum spdk_nvme_qprio qprio)
|
||||
|
@ -123,6 +123,12 @@ nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return UINT32_MAX;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
nvme_transport_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
return SPDK_NVME_IO_QUEUE_MAX_ENTRIES;
|
||||
}
|
||||
|
||||
struct spdk_nvme_qpair *
|
||||
nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
|
||||
enum spdk_nvme_qprio qprio)
|
||||
|
Loading…
Reference in New Issue
Block a user