nvme/pcie: use nvme_pcie_vtophys in the submit request function

And for some internal functions we need to pass controller
parameter so that we can do vtophys based on transport type.

Change-Id: I3ca4fa162ec9305f62b295ba21f7474c21edfe52
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8031
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
Changpeng Liu 2021-06-03 22:11:46 +08:00 committed by Tomasz Zawadzki
parent 621d9d3f63
commit d4844d5b4e
2 changed files with 55 additions and 28 deletions

View File

@ -1063,7 +1063,8 @@ nvme_pcie_fail_request_bad_vtophys(struct spdk_nvme_qpair *qpair, struct nvme_tr
* *prp_index will be updated to account for the number of PRP entries used.
*/
static inline int
nvme_pcie_prp_list_append(struct nvme_tracker *tr, uint32_t *prp_index, void *virt_addr, size_t len,
nvme_pcie_prp_list_append(struct spdk_nvme_ctrlr *ctrlr, struct nvme_tracker *tr,
uint32_t *prp_index, void *virt_addr, size_t len,
uint32_t page_size)
{
struct spdk_nvme_cmd *cmd = &tr->req->cmd;
@ -1092,7 +1093,7 @@ nvme_pcie_prp_list_append(struct nvme_tracker *tr, uint32_t *prp_index, void *vi
return -EFAULT;
}
phys_addr = spdk_vtophys(virt_addr, NULL);
phys_addr = nvme_pcie_vtophys(ctrlr, virt_addr, NULL);
if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) {
SPDK_ERRLOG("vtophys(%p) failed\n", virt_addr);
return -EFAULT;
@ -1153,7 +1154,8 @@ nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_
uint32_t prp_index = 0;
int rc;
rc = nvme_pcie_prp_list_append(tr, &prp_index, req->payload.contig_or_cb_arg + req->payload_offset,
rc = nvme_pcie_prp_list_append(qpair->ctrlr, tr, &prp_index,
req->payload.contig_or_cb_arg + req->payload_offset,
req->payload_size, qpair->ctrlr->page_size);
if (rc) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
@ -1201,7 +1203,7 @@ nvme_pcie_qpair_build_contig_hw_sgl_request(struct spdk_nvme_qpair *qpair, struc
}
mapping_length = length;
phys_addr = spdk_vtophys(virt_addr, &mapping_length);
phys_addr = nvme_pcie_vtophys(qpair->ctrlr, virt_addr, &mapping_length);
if (phys_addr == SPDK_VTOPHYS_ERROR) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
return -EFAULT;
@ -1325,7 +1327,7 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
}
mapping_length = remaining_user_sge_len;
phys_addr = spdk_vtophys(virt_addr, &mapping_length);
phys_addr = nvme_pcie_vtophys(qpair->ctrlr, virt_addr, &mapping_length);
if (phys_addr == SPDK_VTOPHYS_ERROR) {
goto exit;
}
@ -1417,7 +1419,7 @@ nvme_pcie_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvm
assert((length == remaining_transfer_len) ||
_is_page_aligned((uintptr_t)virt_addr + length, page_size));
rc = nvme_pcie_prp_list_append(tr, &prp_index, virt_addr, length, page_size);
rc = nvme_pcie_prp_list_append(qpair->ctrlr, tr, &prp_index, virt_addr, length, page_size);
if (rc) {
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
return rc;
@ -1464,7 +1466,7 @@ nvme_pcie_qpair_build_metadata(struct spdk_nvme_qpair *qpair, struct nvme_tracke
if (sgl_supported && dword_aligned) {
assert(req->cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_SGL;
tr->meta_sgl.address = spdk_vtophys(md_payload, NULL);
tr->meta_sgl.address = nvme_pcie_vtophys(qpair->ctrlr, md_payload, NULL);
if (tr->meta_sgl.address == SPDK_VTOPHYS_ERROR) {
goto exit;
}
@ -1473,7 +1475,7 @@ nvme_pcie_qpair_build_metadata(struct spdk_nvme_qpair *qpair, struct nvme_tracke
tr->meta_sgl.unkeyed.subtype = 0;
req->cmd.mptr = tr->prp_sgl_bus_addr - sizeof(struct spdk_nvme_sgl_descriptor);
} else {
req->cmd.mptr = spdk_vtophys(md_payload, NULL);
req->cmd.mptr = nvme_pcie_vtophys(qpair->ctrlr, md_payload, NULL);
if (req->cmd.mptr == SPDK_VTOPHYS_ERROR) {
goto exit;
}

View File

@ -170,47 +170,54 @@ test_prp_list_append(void)
{
struct nvme_request req;
struct nvme_tracker tr;
struct spdk_nvme_ctrlr ctrlr = {};
uint32_t prp_index;
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
/* Non-DWORD-aligned buffer (invalid) */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100001, 0x1000, 0x1000) == -EFAULT);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100001, 0x1000,
0x1000) == -EFAULT);
/* 512-byte buffer, 4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
CU_ASSERT(prp_index == 1);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
/* 512-byte buffer, non-4K-aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
CU_ASSERT(prp_index == 1);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
/* 4K buffer, 4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
0x1000) == 0);
CU_ASSERT(prp_index == 1);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
/* 4K buffer, non-4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
0x1000) == 0);
CU_ASSERT(prp_index == 2);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
/* 8K buffer, 4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x2000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x2000,
0x1000) == 0);
CU_ASSERT(prp_index == 2);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
/* 8K buffer, non-4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x2000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x2000,
0x1000) == 0);
CU_ASSERT(prp_index == 3);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
@ -219,7 +226,8 @@ test_prp_list_append(void)
/* 12K buffer, 4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x3000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x3000,
0x1000) == 0);
CU_ASSERT(prp_index == 3);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
@ -228,7 +236,8 @@ test_prp_list_append(void)
/* 12K buffer, non-4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x3000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x3000,
0x1000) == 0);
CU_ASSERT(prp_index == 4);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
@ -238,18 +247,22 @@ test_prp_list_append(void)
/* Two 4K buffers, both 4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
0x1000) == 0);
CU_ASSERT(prp_index == 1);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
0x1000) == 0);
CU_ASSERT(prp_index == 2);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
/* Two 4K buffers, first non-4K aligned, second 4K aligned */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
0x1000) == 0);
CU_ASSERT(prp_index == 2);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
0x1000) == 0);
CU_ASSERT(prp_index == 3);
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
@ -258,37 +271,40 @@ test_prp_list_append(void)
/* Two 4K buffers, both non-4K aligned (invalid) */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
0x1000) == 0);
CU_ASSERT(prp_index == 2);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900800, 0x1000, 0x1000) == -EFAULT);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900800, 0x1000,
0x1000) == -EFAULT);
CU_ASSERT(prp_index == 2);
/* 4K buffer, 4K aligned, but vtophys fails */
MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == -EFAULT);
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
0x1000) == -EFAULT);
MOCK_CLEAR(spdk_vtophys);
/* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
(NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
/* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
/* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
(NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT);
/* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
prp_list_prep(&tr, &req, &prp_index);
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
(NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT);
}
@ -429,8 +445,11 @@ test_build_contig_hw_sgl_request(void)
struct spdk_nvme_qpair qpair = {};
struct nvme_request req = {};
struct nvme_tracker tr = {};
struct spdk_nvme_ctrlr ctrlr = {};
int rc;
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
qpair.ctrlr = &ctrlr;
/* Test 1: Payload covered by a single mapping */
req.payload_size = 100;
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
@ -450,6 +469,7 @@ test_build_contig_hw_sgl_request(void)
memset(&tr, 0, sizeof(tr));
/* Test 2: Payload covered by a single mapping, but request is at an offset */
qpair.ctrlr = &ctrlr;
req.payload_size = 100;
req.payload_offset = 50;
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
@ -469,6 +489,7 @@ test_build_contig_hw_sgl_request(void)
memset(&tr, 0, sizeof(tr));
/* Test 3: Payload spans two mappings */
qpair.ctrlr = &ctrlr;
req.payload_size = 100;
req.payload = NVME_PAYLOAD_CONTIG(0, 0);
g_vtophys_size = 60;
@ -503,6 +524,7 @@ test_nvme_pcie_qpair_build_metadata(void)
struct spdk_nvme_ctrlr ctrlr = {};
int rc;
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
tr.req = &req;
qpair.ctrlr = &ctrlr;
@ -603,8 +625,11 @@ test_nvme_pcie_qpair_build_hw_sgl_request(void)
struct nvme_request req = {};
struct nvme_tracker tr = {};
struct nvme_pcie_ut_bdev_io bio = {};
struct spdk_nvme_ctrlr ctrlr = {};
int rc;
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
qpair.ctrlr = &ctrlr;
req.payload.contig_or_cb_arg = &bio;
req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl;
req.payload.next_sge_fn = nvme_pcie_ut_next_sge;