nvme: decouple NVMe memory page size from PAGE_SIZE
PAGE_SIZE is the host memory page size, which is irrelevant for the NVMe driver; what we actually care about is the NVMe controller's memory page size, CC.MPS. This patch cleans up the uses of PAGE_SIZE in the NVMe driver; the behavior is still the same in all cases today, since normal NVMe controllers report a minimum page size of 4096. Change-Id: I56fce2770862329a9ce25370722f44269234ed46 Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com> Reviewed-on: https://review.gerrithub.io/374371 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
a996b228d2
commit
2eec131e4d
@ -502,7 +502,7 @@ nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
|
||||
cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
|
||||
|
||||
/* Page size is 2 ^ (12 + mps). */
|
||||
cc.bits.mps = spdk_u32log2(PAGE_SIZE) - 12;
|
||||
cc.bits.mps = spdk_u32log2(ctrlr->page_size) - 12;
|
||||
|
||||
switch (ctrlr->opts.arb_mechanism) {
|
||||
case SPDK_NVME_CC_AMS_RR:
|
||||
@ -1433,6 +1433,9 @@ nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_reg
|
||||
|
||||
ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
|
||||
|
||||
/* For now, always select page_size == min_page_size. */
|
||||
ctrlr->page_size = ctrlr->min_page_size;
|
||||
|
||||
ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
|
||||
ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
|
||||
ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, max_io_queue_size);
|
||||
|
@ -415,6 +415,9 @@ struct spdk_nvme_ctrlr {
|
||||
/** minimum page size supported by this controller in bytes */
|
||||
uint32_t min_page_size;
|
||||
|
||||
/** selected memory page size for this controller in bytes */
|
||||
uint32_t page_size;
|
||||
|
||||
uint32_t num_aers;
|
||||
struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
|
||||
spdk_nvme_aer_cb aer_cb_fn;
|
||||
@ -633,9 +636,9 @@ void nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr);
|
||||
int nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr);
|
||||
|
||||
static inline bool
|
||||
_is_page_aligned(uint64_t address)
|
||||
_is_page_aligned(uint64_t address, uint64_t page_size)
|
||||
{
|
||||
return (address & (PAGE_SIZE - 1)) == 0;
|
||||
return (address & (page_size - 1)) == 0;
|
||||
}
|
||||
|
||||
#endif /* __NVME_INTERNAL_H__ */
|
||||
|
@ -221,6 +221,7 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
|
||||
uint32_t req_current_length = 0;
|
||||
uint32_t child_length = 0;
|
||||
uint32_t sge_length;
|
||||
uint32_t page_size = qpair->ctrlr->page_size;
|
||||
uintptr_t address;
|
||||
|
||||
args = &req->payload.u.sgl;
|
||||
@ -239,7 +240,7 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
|
||||
* The start of the SGE is invalid if the start address is not page aligned,
|
||||
* unless it is the first SGE in the child request.
|
||||
*/
|
||||
start_valid = child_length == 0 || _is_page_aligned(address);
|
||||
start_valid = child_length == 0 || _is_page_aligned(address, page_size);
|
||||
|
||||
/* Boolean for whether this is the last SGE in the parent request. */
|
||||
last_sge = (req_current_length + sge_length == req->payload_size);
|
||||
@ -248,7 +249,7 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
|
||||
* The end of the SGE is invalid if the end address is not page aligned,
|
||||
* unless it is the last SGE in the parent request.
|
||||
*/
|
||||
end_valid = last_sge || _is_page_aligned(address + sge_length);
|
||||
end_valid = last_sge || _is_page_aligned(address + sge_length, page_size);
|
||||
|
||||
/*
|
||||
* This child request equals the parent request, meaning that no splitting
|
||||
@ -276,7 +277,7 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
|
||||
* request for what we have so far, and then start a new child request for
|
||||
* the next SGE.
|
||||
*/
|
||||
start_valid = _is_page_aligned(address);
|
||||
start_valid = _is_page_aligned(address, page_size);
|
||||
}
|
||||
|
||||
if (start_valid && end_valid && !last_sge) {
|
||||
|
@ -60,15 +60,6 @@
|
||||
|
||||
#define NVME_MAX_PRP_LIST_ENTRIES (506)
|
||||
|
||||
/*
|
||||
* For commands requiring more than 2 PRP entries, one PRP will be
|
||||
* embedded in the command (prp1), and the rest of the PRP entries
|
||||
* will be in a list pointed to by the command (prp2). This means
|
||||
* that real max number of PRP entries we support is 506+1, which
|
||||
* results in a max xfer size of 506*PAGE_SIZE.
|
||||
*/
|
||||
#define NVME_MAX_XFER_SIZE NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE
|
||||
|
||||
struct nvme_pcie_enum_ctx {
|
||||
spdk_nvme_probe_cb probe_cb;
|
||||
void *cb_ctx;
|
||||
@ -397,7 +388,14 @@ nvme_pcie_ctrlr_get_cmbsz(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbsz_
|
||||
uint32_t
|
||||
nvme_pcie_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
return NVME_MAX_XFER_SIZE;
|
||||
/*
|
||||
* For commands requiring more than 2 PRP entries, one PRP will be
|
||||
* embedded in the command (prp1), and the rest of the PRP entries
|
||||
* will be in a list pointed to by the command (prp2). This means
|
||||
* that real max number of PRP entries we support is 506+1, which
|
||||
* results in a max xfer size of 506*ctrlr->page_size.
|
||||
*/
|
||||
return NVME_MAX_PRP_LIST_ENTRIES * ctrlr->page_size;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
@ -1524,9 +1522,11 @@ nvme_pcie_fail_request_bad_vtophys(struct spdk_nvme_qpair *qpair, struct nvme_tr
|
||||
* *prp_index will be updated to account for the number of PRP entries used.
|
||||
*/
|
||||
static int
|
||||
nvme_pcie_prp_list_append(struct nvme_tracker *tr, uint32_t *prp_index, void *virt_addr, size_t len)
|
||||
nvme_pcie_prp_list_append(struct nvme_tracker *tr, uint32_t *prp_index, void *virt_addr, size_t len,
|
||||
uint32_t page_size)
|
||||
{
|
||||
struct spdk_nvme_cmd *cmd = &tr->req->cmd;
|
||||
uintptr_t page_mask = page_size - 1;
|
||||
uint64_t phys_addr;
|
||||
uint32_t i;
|
||||
|
||||
@ -1560,9 +1560,9 @@ nvme_pcie_prp_list_append(struct nvme_tracker *tr, uint32_t *prp_index, void *vi
|
||||
if (i == 0) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "prp1 = %p\n", (void *)phys_addr);
|
||||
cmd->dptr.prp.prp1 = phys_addr;
|
||||
seg_len = PAGE_SIZE - ((uintptr_t)virt_addr & (PAGE_SIZE - 1));
|
||||
seg_len = page_size - ((uintptr_t)virt_addr & page_mask);
|
||||
} else {
|
||||
if ((phys_addr & (PAGE_SIZE - 1)) != 0) {
|
||||
if ((phys_addr & page_mask) != 0) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "PRP %u not page aligned (%p)\n",
|
||||
i, virt_addr);
|
||||
return -EINVAL;
|
||||
@ -1570,7 +1570,7 @@ nvme_pcie_prp_list_append(struct nvme_tracker *tr, uint32_t *prp_index, void *vi
|
||||
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "prp[%u] = %p\n", i - 1, (void *)phys_addr);
|
||||
tr->u.prp[i - 1] = phys_addr;
|
||||
seg_len = PAGE_SIZE;
|
||||
seg_len = page_size;
|
||||
}
|
||||
|
||||
seg_len = spdk_min(seg_len, len);
|
||||
@ -1605,7 +1605,7 @@ nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_
|
||||
int rc;
|
||||
|
||||
rc = nvme_pcie_prp_list_append(tr, &prp_index, req->payload.u.contig + req->payload_offset,
|
||||
req->payload_size);
|
||||
req->payload_size, qpair->ctrlr->page_size);
|
||||
if (rc) {
|
||||
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
|
||||
return rc;
|
||||
@ -1704,6 +1704,7 @@ nvme_pcie_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvm
|
||||
void *virt_addr;
|
||||
uint32_t remaining_transfer_len, length;
|
||||
uint32_t prp_index = 0;
|
||||
uint32_t page_size = qpair->ctrlr->page_size;
|
||||
|
||||
/*
|
||||
* Build scattered payloads.
|
||||
@ -1729,9 +1730,10 @@ nvme_pcie_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvm
|
||||
*
|
||||
* All SGEs except last must end on a page boundary.
|
||||
*/
|
||||
assert((length == remaining_transfer_len) || _is_page_aligned((uintptr_t)virt_addr + length));
|
||||
assert((length == remaining_transfer_len) ||
|
||||
_is_page_aligned((uintptr_t)virt_addr + length, page_size));
|
||||
|
||||
rc = nvme_pcie_prp_list_append(tr, &prp_index, virt_addr, length);
|
||||
rc = nvme_pcie_prp_list_append(tr, &prp_index, virt_addr, length, page_size);
|
||||
if (rc) {
|
||||
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
|
||||
return rc;
|
||||
|
@ -202,6 +202,8 @@ prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
|
||||
* so that we test the SGL splitting path.
|
||||
*/
|
||||
ctrlr->flags = 0;
|
||||
ctrlr->min_page_size = 4096;
|
||||
ctrlr->page_size = 4096;
|
||||
memset(ns, 0, sizeof(*ns));
|
||||
ns->ctrlr = ctrlr;
|
||||
ns->sector_size = sector_size;
|
||||
@ -215,6 +217,7 @@ prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
|
||||
ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
|
||||
|
||||
memset(qpair, 0, sizeof(*qpair));
|
||||
qpair->ctrlr = ctrlr;
|
||||
qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
|
||||
SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
|
||||
|
||||
|
@ -426,7 +426,7 @@ test_sgl_req(void)
|
||||
payload.u.sgl.cb_arg = &io_req;
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
|
||||
req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
req->cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
req->cmd.cdw10 = 10000;
|
||||
@ -438,7 +438,7 @@ test_sgl_req(void)
|
||||
cleanup_submit_request_test(&qpair);
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
|
||||
req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
req->cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
req->cmd.cdw10 = 10000;
|
||||
@ -453,7 +453,7 @@ test_sgl_req(void)
|
||||
fail_next_sge = false;
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
|
||||
req = nvme_allocate_request(&payload, 2 * 0x1000, NULL, &io_req);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
req->cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
req->cmd.cdw10 = 10000;
|
||||
@ -465,7 +465,7 @@ test_sgl_req(void)
|
||||
cleanup_submit_request_test(&qpair);
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * PAGE_SIZE, NULL, &io_req);
|
||||
req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, NULL, &io_req);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
req->cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
req->cmd.cdw10 = 10000;
|
||||
@ -478,7 +478,7 @@ test_sgl_req(void)
|
||||
sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
|
||||
if (sgl_tr != NULL) {
|
||||
for (i = 0; i < NVME_MAX_PRP_LIST_ENTRIES; i++) {
|
||||
CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
|
||||
CU_ASSERT(sgl_tr->u.prp[i] == (0x1000 * (i + 1)));
|
||||
}
|
||||
|
||||
TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
|
||||
@ -504,7 +504,7 @@ test_hw_sgl_req(void)
|
||||
payload.u.sgl.cb_arg = &io_req;
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
|
||||
req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
req->cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
req->cmd.cdw10 = 10000;
|
||||
@ -526,7 +526,7 @@ test_hw_sgl_req(void)
|
||||
nvme_free_request(req);
|
||||
|
||||
prepare_submit_request_test(&qpair, &ctrlr);
|
||||
req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * PAGE_SIZE, NULL, &io_req);
|
||||
req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * 0x1000, NULL, &io_req);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
req->cmd.opc = SPDK_NVME_OPC_WRITE;
|
||||
req->cmd.cdw10 = 10000;
|
||||
@ -661,43 +661,43 @@ test_prp_list_append(void)
|
||||
|
||||
/* Non-DWORD-aligned buffer (invalid) */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100001, 0x1000) == -EINVAL);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100001, 0x1000, 0x1000) == -EINVAL);
|
||||
|
||||
/* 512-byte buffer, 4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x200) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 1);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
|
||||
|
||||
/* 512-byte buffer, non-4K-aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x108000, 0x200) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 1);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
|
||||
|
||||
/* 4K buffer, 4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 1);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
|
||||
|
||||
/* 4K buffer, non-4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 2);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
|
||||
|
||||
/* 8K buffer, 4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x2000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x2000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 2);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
|
||||
|
||||
/* 8K buffer, non-4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x2000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x2000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 3);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
|
||||
@ -706,7 +706,7 @@ test_prp_list_append(void)
|
||||
|
||||
/* 12K buffer, 4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x3000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x3000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 3);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
|
||||
@ -715,7 +715,7 @@ test_prp_list_append(void)
|
||||
|
||||
/* 12K buffer, non-4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x3000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x3000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 4);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
|
||||
@ -725,18 +725,18 @@ test_prp_list_append(void)
|
||||
|
||||
/* Two 4K buffers, both 4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 1);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 2);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
|
||||
|
||||
/* Two 4K buffers, first non-4K aligned, second 4K aligned */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 2);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 3);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
|
||||
CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
|
||||
@ -745,38 +745,38 @@ test_prp_list_append(void)
|
||||
|
||||
/* Two 4K buffers, both non-4K aligned (invalid) */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000) == 0);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == 2);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900800, 0x1000) == -EINVAL);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900800, 0x1000, 0x1000) == -EINVAL);
|
||||
CU_ASSERT(prp_index == 2);
|
||||
|
||||
/* 4K buffer, 4K aligned, but vtophys fails */
|
||||
ut_fail_vtophys = true;
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000) == -EINVAL);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == -EINVAL);
|
||||
ut_fail_vtophys = false;
|
||||
|
||||
/* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
|
||||
(NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000) == 0);
|
||||
(NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
|
||||
|
||||
/* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
|
||||
NVME_MAX_PRP_LIST_ENTRIES * 0x1000) == 0);
|
||||
NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
|
||||
CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
|
||||
|
||||
/* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
|
||||
(NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000) == -EINVAL);
|
||||
(NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EINVAL);
|
||||
|
||||
/* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
|
||||
prp_list_prep(&tr, &req, &prp_index);
|
||||
CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
|
||||
(NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000) == -EINVAL);
|
||||
(NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EINVAL);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
|
Loading…
Reference in New Issue
Block a user