nvme: fix sgl processing for single sge payloads > 4KB

For the nvme readv/writev APIs, the PRP checking logic was
incorrectly failing single SGE payloads that were larger
than 4KB.  This patch adds a test case for this scenario,
and fixes the PRP checking logic.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I6357d620599666046d2cb74d7923dac1f75418c5
This commit is contained in:
Jim Harris 2016-10-12 14:51:20 -07:00
parent 68cdd81a83
commit de16fcca32
3 changed files with 33 additions and 21 deletions

View File

@ -827,6 +827,13 @@ _nvme_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_re
return -1;
}
/* Confirm that this sge is prp compatible. */
if (phys_addr & 0x3 ||
(length < remaining_transfer_len && ((phys_addr + length) & (PAGE_SIZE - 1)))) {
_nvme_fail_request_bad_vtophys(qpair, tr);
return -1;
}
data_transferred = nvme_min(remaining_transfer_len, length);
nseg = data_transferred >> nvme_u32log2(PAGE_SIZE);
@ -839,6 +846,7 @@ _nvme_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_re
if (total_nseg == 0) {
req->cmd.psdt = SPDK_NVME_PSDT_PRP;
req->cmd.dptr.prp.prp1 = phys_addr;
phys_addr -= unaligned;
}
total_nseg += nseg;
@ -847,7 +855,7 @@ _nvme_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_re
if (total_nseg == 2) {
if (sge_count == 1)
tr->req->cmd.dptr.prp.prp2 = phys_addr + PAGE_SIZE - unaligned;
tr->req->cmd.dptr.prp.prp2 = phys_addr + PAGE_SIZE;
else if (sge_count == 2)
tr->req->cmd.dptr.prp.prp2 = phys_addr;
/* save prp2 value */
@ -862,20 +870,12 @@ _nvme_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_re
while (cur_nseg < nseg) {
if (prp2) {
tr->u.prp[0] = prp2;
tr->u.prp[last_nseg + 1] = phys_addr + cur_nseg * PAGE_SIZE - unaligned;
tr->u.prp[last_nseg + 1] = phys_addr + cur_nseg * PAGE_SIZE;
} else
tr->u.prp[last_nseg] = phys_addr + cur_nseg * PAGE_SIZE - unaligned;
tr->u.prp[last_nseg] = phys_addr + cur_nseg * PAGE_SIZE;
last_nseg++;
cur_nseg++;
/* physical address and length check */
if (remaining_transfer_len || (!remaining_transfer_len && (cur_nseg < nseg))) {
if ((length & (PAGE_SIZE - 1)) || unaligned) {
_nvme_fail_request_bad_vtophys(qpair, tr);
return -1;
}
}
}
}
}

View File

@ -76,6 +76,7 @@ struct io_request {
uint32_t current_iov_bytes_left;
struct sgl_element iovs[MAX_IOVS];
uint32_t nseg;
uint32_t misalign;
};
static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
@ -216,6 +217,22 @@ static void build_io_request_6(struct io_request *req)
req->iovs[1].len = 0x1000;
}
static void build_io_request_7(struct io_request *req)
{
uint8_t *base;
req->nseg = 1;
/*
* Create a 64KB sge, but ensure it is *not* aligned on a 4KB
* boundary. This is valid for single element buffers with PRP.
*/
base = spdk_zmalloc(0x11000, 0x1000, NULL);
req->misalign = 64;
req->iovs[0].base = base + req->misalign;
req->iovs[0].len = 0x10000;
}
typedef void (*nvme_build_io_req_fn_t)(struct io_request *req);
static void
@ -228,7 +245,7 @@ free_req(struct io_request *req)
}
for (i = 0; i < req->nseg; i++) {
spdk_free(req->iovs[i].base);
spdk_free(req->iovs[i].base - req->misalign);
}
spdk_free(req);
@ -432,7 +449,8 @@ int main(int argc, char **argv)
|| TEST(build_io_request_3)
|| TEST(build_io_request_4)
|| TEST(build_io_request_5)
|| TEST(build_io_request_6)) {
|| TEST(build_io_request_6)
|| TEST(build_io_request_7)) {
#undef TEST
rc = 1;
printf("%s: failed sgl tests\n", iter->name);

View File

@ -340,15 +340,9 @@ test_sgl_req(void)
req->cmd.cdw12 = 7 | 0;
req->payload_offset = 1;
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
CU_ASSERT(req->cmd.psdt == SPDK_NVME_PSDT_PRP);
CU_ASSERT(req->cmd.dptr.prp.prp1 == 7);
CU_ASSERT(req->cmd.dptr.prp.prp2 == 4096);
sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
LIST_REMOVE(sgl_tr, list);
CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
CU_ASSERT(qpair.sq_tail == 0);
cleanup_submit_request_test(&qpair);
nvme_free_request(req);
prepare_submit_request_test(&qpair, &ctrlr, &regs);
req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);