nvme/pcie: reduce physically contiguous memory for CQ/SQ
Following patch made sure that CQ/SQ are allocated in physically contiguous manner: (64db67) nvme/pcie: make sure sq and cq are physically contiguous Using MAX_IO_QUEUE_ENTRIES is enough to make sure that either queue does not span multiple hugepages. Yet the patch made sure that whole page is occupied only by the queue. Which unnecessarily increases memory consumption up to two hugepages per each qpair. This patch changes it so that each queue alignment is limited up to its size. Changes in hugepages consumed when allocating io_qpair in hello_world application: io_queue_size Without patch With patch 256 8MiB 0MiB 1024 12MiB 4MiB 4096 24MiB 16MiB Note: 0MiB means no new hugepages were required and qpair fits into previously allocated hugepages (see all steps before io_qpair allocation in hello_world). Intersting result of this patch is that since we required alignment up to the hugepage size this resulted in reserving even two 2MiB hugepages to account for DPDK internal malloc trailing element. See alloc_sz in try_expand_heap_primary() within malloc_heap.c This patch not only reduces overall memory reserved for the queues, but decreases increase in heap consumption on DPDK side. Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Change-Id: I75bf86e93674b4822d8204df3fb99458dec61e9c Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2244 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
2623c4c4d8
commit
d3cf561199
@ -1032,7 +1032,8 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
|
||||
uint16_t i;
|
||||
volatile uint32_t *doorbell_base;
|
||||
uint16_t num_trackers;
|
||||
size_t page_align = VALUE_2MB;
|
||||
size_t page_align = sysconf(_SC_PAGESIZE);
|
||||
size_t queue_align, queue_len;
|
||||
uint32_t flags = SPDK_MALLOC_DMA;
|
||||
uint64_t sq_paddr = 0;
|
||||
uint64_t cq_paddr = 0;
|
||||
@ -1070,7 +1071,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
|
||||
/* cmd and cpl rings must be aligned on page size boundaries. */
|
||||
if (ctrlr->opts.use_cmb_sqs) {
|
||||
pqpair->cmd = nvme_pcie_ctrlr_alloc_cmb(ctrlr, pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
|
||||
sysconf(_SC_PAGESIZE), &pqpair->cmd_bus_addr);
|
||||
page_align, &pqpair->cmd_bus_addr);
|
||||
if (pqpair->cmd != NULL) {
|
||||
pqpair->sq_in_cmb = true;
|
||||
}
|
||||
@ -1083,9 +1084,9 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
|
||||
/* To ensure physical address contiguity we make each ring occupy
|
||||
* a single hugepage only. See MAX_IO_QUEUE_ENTRIES.
|
||||
*/
|
||||
pqpair->cmd = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
|
||||
page_align, NULL,
|
||||
SPDK_ENV_SOCKET_ID_ANY, flags);
|
||||
queue_len = pqpair->num_entries * sizeof(struct spdk_nvme_cmd);
|
||||
queue_align = spdk_max(spdk_align32pow2(queue_len), page_align);
|
||||
pqpair->cmd = spdk_zmalloc(queue_len, queue_align, NULL, SPDK_ENV_SOCKET_ID_ANY, flags);
|
||||
if (pqpair->cmd == NULL) {
|
||||
SPDK_ERRLOG("alloc qpair_cmd failed\n");
|
||||
return -ENOMEM;
|
||||
@ -1106,9 +1107,9 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
|
||||
if (pqpair->cq_vaddr) {
|
||||
pqpair->cpl = pqpair->cq_vaddr;
|
||||
} else {
|
||||
pqpair->cpl = spdk_zmalloc(pqpair->num_entries * sizeof(struct spdk_nvme_cpl),
|
||||
page_align, NULL,
|
||||
SPDK_ENV_SOCKET_ID_ANY, flags);
|
||||
queue_len = pqpair->num_entries * sizeof(struct spdk_nvme_cpl);
|
||||
queue_align = spdk_max(spdk_align32pow2(queue_len), page_align);
|
||||
pqpair->cpl = spdk_zmalloc(queue_len, queue_align, NULL, SPDK_ENV_SOCKET_ID_ANY, flags);
|
||||
if (pqpair->cpl == NULL) {
|
||||
SPDK_ERRLOG("alloc qpair_cpl failed\n");
|
||||
return -ENOMEM;
|
||||
|
Loading…
Reference in New Issue
Block a user