vhost/nvme: ensure the completion queue is not full before post cqes

Change-Id: Iadab3e8f1613a5e13e5a2712f30b714929637a30
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/408775
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
This commit is contained in:
Changpeng Liu 2018-04-24 00:32:05 -04:00 committed by Daniel Verkamp
parent ebedb9fd25
commit cb094a03f0

View File

@ -71,7 +71,8 @@ struct spdk_vhost_nvme_cq {
bool valid;
volatile struct spdk_nvme_cpl *cq_cqe;
uint16_t cq_head;
uint16_t last_signaled_cq_head;
uint16_t guest_signaled_cq_head;
STAILQ_HEAD(, spdk_vhost_nvme_task) cq_full_waited_tasks;
bool irq_enabled;
int virq;
};
@ -108,6 +109,8 @@ struct spdk_vhost_nvme_task {
/* parent pointer. */
struct spdk_vhost_nvme_task *parent;
bool success;
uint8_t sct;
uint8_t sc;
uint32_t num_children;
STAILQ_ENTRY(spdk_vhost_nvme_task) stailq;
};
@ -178,6 +181,12 @@ nvme_inc_cq_head(struct spdk_vhost_nvme_cq *cq)
}
}
static bool
nvme_cq_is_full(struct spdk_vhost_nvme_cq *cq)
{
return ((cq->cq_head + 1) % cq->size == cq->guest_signaled_cq_head);
}
static void
nvme_inc_sq_head(struct spdk_vhost_nvme_sq *sq)
{
@ -285,13 +294,8 @@ blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg
struct spdk_vhost_nvme_cq *cq;
struct spdk_vhost_nvme_sq *sq;
struct spdk_nvme_cmd *cmd = &task->cmd;
uint32_t cq_head;
int sc, sct;
if (spdk_likely(bdev_io)) {
spdk_bdev_free_io(bdev_io);
}
cqid = task->cqid;
cq = spdk_vhost_nvme_get_cq_from_qid(nvme, cqid);
sq = spdk_vhost_nvme_get_sq_from_qid(nvme, task->sqid);
@ -300,15 +304,31 @@ blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg
return;
}
task->success = success;
if (spdk_unlikely(!success && bdev_io)) {
spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
task->sct = sct;
task->sc = sc;
}
if (spdk_likely(bdev_io)) {
spdk_bdev_free_io(bdev_io);
}
cq->guest_signaled_cq_head = nvme->dbbuf_dbs[cq_offset(cqid, 1)];
if (spdk_unlikely(nvme_cq_is_full(cq))) {
STAILQ_INSERT_TAIL(&cq->cq_full_waited_tasks, task, stailq);
return;
}
cqe.sqid = task->sqid;
cqe.sqhd = sq->sq_head;
cqe.cid = cmd->cid;
cqe.status.sct = 0;
cqe.status.sc = 0;
if (spdk_unlikely(!success)) {
spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
cqe.status.sct = sct;
cqe.status.sc = sc;
cqe.status.sct = task->sct;
cqe.status.sc = task->sc;
cqe.status.dnr = 1;
SPDK_ERRLOG("I/O error, sector %u\n", cmd->cdw10);
}
@ -319,15 +339,10 @@ blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg
nvme_inc_cq_head(cq);
/* completion */
cq_head = nvme->dbbuf_dbs[cq_offset(cqid, 1)];
if (cq_head != cq->last_signaled_cq_head) {
cq->last_signaled_cq_head = (uint16_t)cq_head;
/* MMIO Controll */
nvme->dbbuf_eis[cq_offset(cqid, 1)] = (uint32_t)(cq_head - 1);
}
/* MMIO Controll */
nvme->dbbuf_eis[cq_offset(cqid, 1)] = (uint32_t)(cq->guest_signaled_cq_head - 1);
if (cq->irq_enabled && (cq->cq_head != cq_head)) {
if (cq->irq_enabled && (cq->cq_head != cq->guest_signaled_cq_head)) {
eventfd_write(cq->virq, (eventfd_t)1);
}
@ -485,6 +500,7 @@ nvme_worker(void *arg)
{
struct spdk_vhost_nvme_dev *nvme = (struct spdk_vhost_nvme_dev *)arg;
struct spdk_vhost_nvme_sq *sq;
struct spdk_vhost_nvme_cq *cq;
struct spdk_vhost_nvme_task *task;
uint32_t qid, dbbuf_sq;
int ret;
@ -501,13 +517,23 @@ nvme_worker(void *arg)
return -1;
}
/* Submission Queue */
for (qid = 1; qid <= MAX_IO_QUEUES; qid++) {
sq = spdk_vhost_nvme_get_sq_from_qid(nvme, qid);
if (!sq->valid) {
continue;
}
cq = spdk_vhost_nvme_get_cq_from_qid(nvme, sq->cqid);
if (spdk_unlikely(!cq)) {
return -1;
}
cq->guest_signaled_cq_head = nvme->dbbuf_dbs[cq_offset(sq->cqid, 1)];
if (spdk_unlikely(!STAILQ_EMPTY(&cq->cq_full_waited_tasks) &&
!nvme_cq_is_full(cq))) {
task = STAILQ_FIRST(&cq->cq_full_waited_tasks);
STAILQ_REMOVE_HEAD(&cq->cq_full_waited_tasks, stailq);
blk_request_complete_cb(NULL, task->success, task);
}
dbbuf_sq = nvme->dbbuf_dbs[sq_offset(qid, 1)];
sq->sq_tail = (uint16_t)dbbuf_sq;
@ -689,7 +715,7 @@ vhost_nvme_create_io_cq(struct spdk_vhost_nvme_dev *nvme,
/* Setup virq through vhost messages */
cq->virq = -1;
cq->cq_head = 0;
cq->last_signaled_cq_head = 0;
cq->guest_signaled_cq_head = 0;
requested_len = sizeof(struct spdk_nvme_cpl) * cq->size;
cq->cq_cqe = spdk_vhost_gpa_to_vva(&nvme->vdev, dma_addr, requested_len);
if (!cq->cq_cqe) {
@ -697,6 +723,7 @@ vhost_nvme_create_io_cq(struct spdk_vhost_nvme_dev *nvme,
}
nvme->num_cqs++;
cq->valid = true;
STAILQ_INIT(&cq->cq_full_waited_tasks);
cpl->status.sc = 0;
cpl->status.sct = 0;