nvme: Use sgls, if available, even for contiguous memory
The hardware sgl format can describe large contiguous buffers using just a single element, so it's more efficient that a prp list even for a single memory segment. Always use the sgl format. Change-Id: I9c62582829f0d64dcd1babdbc48930ddb4d9e626 Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/475542 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
parent
25baf7148d
commit
bed4cdf6c7
@ -1806,6 +1806,78 @@ nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build an SGL describing a physically contiguous payload buffer.
|
||||
*
|
||||
* This is more efficient than using PRP because large buffers can be
|
||||
* described this way.
|
||||
*/
|
||||
static int
|
||||
nvme_pcie_qpair_build_contig_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req,
|
||||
struct nvme_tracker *tr)
|
||||
{
|
||||
void *virt_addr;
|
||||
uint64_t phys_addr, mapping_length;
|
||||
uint32_t length;
|
||||
struct spdk_nvme_sgl_descriptor *sgl;
|
||||
uint32_t nseg = 0;
|
||||
|
||||
assert(req->payload_size != 0);
|
||||
assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
|
||||
|
||||
sgl = tr->u.sgl;
|
||||
req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
|
||||
req->cmd.dptr.sgl1.unkeyed.subtype = 0;
|
||||
|
||||
length = req->payload_size;
|
||||
virt_addr = req->payload.contig_or_cb_arg + req->payload_offset;
|
||||
|
||||
while (length > 0) {
|
||||
if (nseg >= NVME_MAX_SGL_DESCRIPTORS) {
|
||||
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
phys_addr = spdk_vtophys(virt_addr, &mapping_length);
|
||||
if (phys_addr == SPDK_VTOPHYS_ERROR) {
|
||||
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mapping_length = spdk_min(length, mapping_length);
|
||||
|
||||
length -= mapping_length;
|
||||
virt_addr += mapping_length;
|
||||
|
||||
sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
|
||||
sgl->unkeyed.length = mapping_length;
|
||||
sgl->address = phys_addr;
|
||||
sgl->unkeyed.subtype = 0;
|
||||
|
||||
sgl++;
|
||||
nseg++;
|
||||
}
|
||||
|
||||
if (nseg == 1) {
|
||||
/*
|
||||
* The whole transfer can be described by a single SGL descriptor.
|
||||
* Use the special case described by the spec where SGL1's type is Data Block.
|
||||
* This means the SGL in the tracker is not used at all, so copy the first (and only)
|
||||
* SGL element into SGL1.
|
||||
*/
|
||||
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
|
||||
req->cmd.dptr.sgl1.address = tr->u.sgl[0].address;
|
||||
req->cmd.dptr.sgl1.unkeyed.length = tr->u.sgl[0].unkeyed.length;
|
||||
} else {
|
||||
/* For now we can only support 1 SGL segment in NVMe controller */
|
||||
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
|
||||
req->cmd.dptr.sgl1.address = tr->prp_sgl_bus_addr;
|
||||
req->cmd.dptr.sgl1.unkeyed.length = nseg * sizeof(struct spdk_nvme_sgl_descriptor);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build SGL list describing scattered payload buffer.
|
||||
*/
|
||||
@ -1992,7 +2064,11 @@ nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_reques
|
||||
/* Null payload - leave PRP fields untouched */
|
||||
rc = 0;
|
||||
} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
|
||||
rc = nvme_pcie_qpair_build_contig_request(qpair, req, tr);
|
||||
if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
|
||||
rc = nvme_pcie_qpair_build_contig_hw_sgl_request(qpair, req, tr);
|
||||
} else {
|
||||
rc = nvme_pcie_qpair_build_contig_request(qpair, req, tr);
|
||||
}
|
||||
} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
|
||||
if (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
|
||||
rc = nvme_pcie_qpair_build_hw_sgl_request(qpair, req, tr);
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "spdk/stdinc.h"
|
||||
|
||||
#include "spdk_cunit.h"
|
||||
|
||||
#define UNIT_TEST_NO_VTOPHYS
|
||||
#include "common/lib/test_env.c"
|
||||
|
||||
#include "nvme/nvme_pcie.c"
|
||||
@ -90,6 +92,21 @@ spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, voi
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t g_vtophys_size = 0;
|
||||
|
||||
DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
|
||||
uint64_t
|
||||
spdk_vtophys(void *buf, uint64_t *size)
|
||||
{
|
||||
if (size) {
|
||||
*size = g_vtophys_size;
|
||||
}
|
||||
|
||||
HANDLE_RETURN_MOCK(spdk_vtophys);
|
||||
|
||||
return (uintptr_t)buf;
|
||||
}
|
||||
|
||||
DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {});
|
||||
DEFINE_STUB(nvme_ctrlr_add_process, int, (struct spdk_nvme_ctrlr *ctrlr, void *devhandle), 0);
|
||||
DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
|
||||
@ -101,6 +118,13 @@ DEFINE_STUB(spdk_nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
|
||||
(const struct spdk_nvme_transport_id *trid), NULL);
|
||||
DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
|
||||
(struct spdk_nvme_ctrlr *ctrlr), {});
|
||||
DEFINE_STUB(spdk_nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
|
||||
(struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
|
||||
DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
|
||||
DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
|
||||
struct spdk_nvme_cmd *cmd));
|
||||
DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
|
||||
struct spdk_nvme_cpl *cpl));
|
||||
|
||||
static void
|
||||
prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
|
||||
@ -349,6 +373,77 @@ static void test_shadow_doorbell_update(void)
|
||||
CU_ASSERT(ret == true);
|
||||
}
|
||||
|
||||
static void
|
||||
test_build_contig_hw_sgl_request(void)
|
||||
{
|
||||
struct spdk_nvme_qpair qpair = {};
|
||||
struct nvme_request req = {};
|
||||
struct nvme_tracker tr = {};
|
||||
int rc;
|
||||
|
||||
/* Test 1: Payload covered by a single mapping */
|
||||
req.payload_size = 100;
|
||||
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
|
||||
g_vtophys_size = 100;
|
||||
MOCK_SET(spdk_vtophys, 0xDEADBEEF);
|
||||
|
||||
rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
|
||||
|
||||
MOCK_CLEAR(spdk_vtophys);
|
||||
g_vtophys_size = 0;
|
||||
memset(&qpair, 0, sizeof(qpair));
|
||||
memset(&req, 0, sizeof(req));
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
|
||||
/* Test 2: Payload covered by a single mapping, but request is at an offset */
|
||||
req.payload_size = 100;
|
||||
req.payload_offset = 50;
|
||||
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
|
||||
g_vtophys_size = 1000;
|
||||
MOCK_SET(spdk_vtophys, 0xDEADBEEF);
|
||||
|
||||
rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
|
||||
|
||||
MOCK_CLEAR(spdk_vtophys);
|
||||
g_vtophys_size = 0;
|
||||
memset(&qpair, 0, sizeof(qpair));
|
||||
memset(&req, 0, sizeof(req));
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
|
||||
/* Test 3: Payload spans two mappings */
|
||||
req.payload_size = 100;
|
||||
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
|
||||
g_vtophys_size = 60;
|
||||
tr.prp_sgl_bus_addr = 0xFF0FF;
|
||||
MOCK_SET(spdk_vtophys, 0xDEADBEEF);
|
||||
|
||||
rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
|
||||
CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
|
||||
CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
||||
CU_ASSERT(tr.u.sgl[0].unkeyed.length = 60);
|
||||
CU_ASSERT(tr.u.sgl[0].address = 0xDEADBEEF);
|
||||
CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
|
||||
CU_ASSERT(tr.u.sgl[1].unkeyed.length = 40);
|
||||
CU_ASSERT(tr.u.sgl[1].address = 0xDEADBEEF);
|
||||
|
||||
MOCK_CLEAR(spdk_vtophys);
|
||||
g_vtophys_size = 0;
|
||||
memset(&qpair, 0, sizeof(qpair));
|
||||
memset(&req, 0, sizeof(req));
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
CU_pSuite suite = NULL;
|
||||
@ -364,10 +459,10 @@ int main(int argc, char **argv)
|
||||
return CU_get_error();
|
||||
}
|
||||
|
||||
if (CU_add_test(suite, "prp_list_append", test_prp_list_append) == NULL
|
||||
|| CU_add_test(suite, "nvme_pcie_hotplug_monitor", test_nvme_pcie_hotplug_monitor) == NULL
|
||||
|| CU_add_test(suite, "shadow_doorbell_update",
|
||||
test_shadow_doorbell_update) == NULL) {
|
||||
if (CU_add_test(suite, "prp_list_append", test_prp_list_append) == NULL ||
|
||||
CU_add_test(suite, "nvme_pcie_hotplug_monitor", test_nvme_pcie_hotplug_monitor) == NULL ||
|
||||
CU_add_test(suite, "shadow_doorbell_update", test_shadow_doorbell_update) == NULL ||
|
||||
CU_add_test(suite, "build_contig_hw_sgl_request", test_build_contig_hw_sgl_request) == NULL) {
|
||||
CU_cleanup_registry();
|
||||
return CU_get_error();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user