vhost-nvme: use the new vhost API for memory address translation

Change-Id: If8367f8d0bd5870c50f0f60df47c204a0c67ea1b
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/408723
Reviewed-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Changpeng Liu 2018-04-18 22:01:44 -04:00 committed by Daniel Verkamp
parent 364e0fa94d
commit 8fdc3c0424

View File

@ -124,8 +124,6 @@ struct spdk_vhost_nvme_dev {
uint32_t num_sqs;
uint32_t num_cqs;
struct rte_vhost_memory *mem;
uint32_t num_ns;
struct spdk_vhost_nvme_ns ns[MAX_NAMESPACE];
@ -213,23 +211,23 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
uint64_t prp1, prp2;
uintptr_t vva;
uint32_t i;
uint32_t residue_len, mps = 4096;
uint32_t residue_len, nents, mps = 4096;
uint64_t *prp_list;
prp1 = cmd->dptr.prp.prp1;
prp2 = cmd->dptr.prp.prp2;
vva = (uintptr_t)rte_vhost_gpa_to_vva(nvme->mem, prp1);
/* PRP1 may started with unaligned page address */
residue_len = mps - (prp1 % mps);
residue_len = spdk_min(len, residue_len);
vva = (uintptr_t)spdk_vhost_gpa_to_vva(&nvme->vdev, prp1, residue_len);
if (spdk_unlikely(vva == 0)) {
SPDK_ERRLOG("GPA to VVA failed\n");
return -1;
}
task->iovs[0].iov_base = (void *)vva;
/* PRP1 may started with unaligned page address */
residue_len = mps - (prp1 % mps);
residue_len = spdk_min(len, residue_len);
task->iovs[0].iov_len = residue_len;
len -= residue_len;
if (len) {
@ -237,7 +235,7 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
/* 2 PRP used */
task->iovcnt = 2;
assert(prp2 != 0);
vva = (uintptr_t)rte_vhost_gpa_to_vva(nvme->mem, prp2);
vva = (uintptr_t)spdk_vhost_gpa_to_vva(&nvme->vdev, prp2, len);
if (spdk_unlikely(vva == 0)) {
return -1;
}
@ -246,7 +244,8 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
} else {
/* PRP list used */
assert(prp2 != 0);
vva = (uintptr_t)rte_vhost_gpa_to_vva(nvme->mem, prp2);
nents = (len + mps - 1) / mps;
vva = (uintptr_t)spdk_vhost_gpa_to_vva(&nvme->vdev, prp2, nents * sizeof(*prp_list));
if (spdk_unlikely(vva == 0)) {
return -1;
}
@ -254,7 +253,7 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
i = 0;
while (len != 0) {
residue_len = spdk_min(len, mps);
vva = (uintptr_t)rte_vhost_gpa_to_vva(nvme->mem, prp_list[i]);
vva = (uintptr_t)spdk_vhost_gpa_to_vva(&nvme->vdev, prp_list[i], residue_len);
if (spdk_unlikely(vva == 0)) {
return -1;
}
@ -554,8 +553,8 @@ vhost_nvme_doorbell_buffer_config(struct spdk_vhost_nvme_dev *nvme,
return -1;
}
/* Guest Physical Address to Host Virtual Address */
nvme->dbbuf_dbs = (void *)(uintptr_t)rte_vhost_gpa_to_vva(nvme->mem, dbs_dma_addr);
nvme->dbbuf_eis = (void *)(uintptr_t)rte_vhost_gpa_to_vva(nvme->mem, eis_dma_addr);
nvme->dbbuf_dbs = (void *)(uintptr_t)spdk_vhost_gpa_to_vva(&nvme->vdev, dbs_dma_addr, 4096);
nvme->dbbuf_eis = (void *)(uintptr_t)spdk_vhost_gpa_to_vva(&nvme->vdev, eis_dma_addr, 4096);
if (!nvme->dbbuf_dbs || !nvme->dbbuf_eis) {
return -1;
}
@ -574,6 +573,7 @@ vhost_nvme_create_io_sq(struct spdk_vhost_nvme_dev *nvme,
{
uint16_t qid, qsize, cqid;
uint64_t dma_addr;
uint64_t requested_len;
struct spdk_vhost_nvme_sq *sq;
/* physical contiguous */
@ -597,7 +597,8 @@ vhost_nvme_create_io_sq(struct spdk_vhost_nvme_dev *nvme,
sq->cqid = cqid;
sq->size = qsize + 1;
sq->sq_head = sq->sq_tail = 0;
sq->sq_cmd = (void *)(uintptr_t)rte_vhost_gpa_to_vva(nvme->mem, dma_addr);
requested_len = sizeof(struct spdk_nvme_cmd) * sq->size;
sq->sq_cmd = (void *)(uintptr_t)spdk_vhost_gpa_to_vva(&nvme->vdev, dma_addr, requested_len);
if (!sq->sq_cmd) {
return -1;
}
@ -646,6 +647,7 @@ vhost_nvme_create_io_cq(struct spdk_vhost_nvme_dev *nvme,
uint16_t qsize, qid;
uint64_t dma_addr;
struct spdk_vhost_nvme_cq *cq;
uint64_t requested_len;
/* physical contiguous */
assert(cmd->cdw11 & 0x1);
@ -668,7 +670,8 @@ vhost_nvme_create_io_cq(struct spdk_vhost_nvme_dev *nvme,
cq->virq = -1;
cq->cq_head = 0;
cq->last_signaled_cq_head = 0;
cq->cq_cqe = (void *)(uintptr_t)rte_vhost_gpa_to_vva(nvme->mem, dma_addr);
requested_len = sizeof(struct spdk_nvme_cpl) * cq->size;
cq->cq_cqe = (void *)(uintptr_t)spdk_vhost_gpa_to_vva(&nvme->vdev, dma_addr, requested_len);
if (!cq->cq_cqe) {
return -1;
}
@ -885,8 +888,6 @@ spdk_vhost_nvme_start_device(struct spdk_vhost_dev *vdev, void *event_ctx)
return -1;
}
nvme->mem = vdev->mem;
if (alloc_task_pool(nvme)) {
return -1;
}