diff --git a/lib/vhost/vhost_nvme.c b/lib/vhost/vhost_nvme.c index 9681121b62..dd3447298c 100644 --- a/lib/vhost/vhost_nvme.c +++ b/lib/vhost/vhost_nvme.c @@ -72,6 +72,7 @@ struct spdk_vhost_nvme_cq { volatile struct spdk_nvme_cpl *cq_cqe; uint16_t cq_head; uint16_t guest_signaled_cq_head; + uint32_t need_signaled_cnt; STAILQ_HEAD(, spdk_vhost_nvme_task) cq_full_waited_tasks; bool irq_enabled; int virq; @@ -284,6 +285,28 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd, return 0; } +static void +spdk_nvme_cq_signal_fd(struct spdk_vhost_nvme_dev *nvme) +{ + struct spdk_vhost_nvme_cq *cq; + uint32_t qid, cq_head; + + assert(nvme != NULL); + + for (qid = 1; qid <= MAX_IO_QUEUES; qid++) { + cq = spdk_vhost_nvme_get_cq_from_qid(nvme, qid); + if (!cq || !cq->valid) { + continue; + } + + cq_head = nvme->dbbuf_dbs[cq_offset(qid, 1)]; + if (cq->irq_enabled && cq->need_signaled_cnt && (cq->cq_head != cq_head)) { + eventfd_write(cq->virq, (eventfd_t)1); + cq->need_signaled_cnt = 0; + } + } +} + static void blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) { @@ -338,14 +361,11 @@ blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg cq->cq_cqe[cq->cq_head].status.p = cq->phase; nvme_inc_cq_head(cq); + cq->need_signaled_cnt++; /* MMIO Controll */ nvme->dbbuf_eis[cq_offset(cqid, 1)] = (uint32_t)(cq->guest_signaled_cq_head - 1); - if (cq->irq_enabled && (cq->cq_head != cq->guest_signaled_cq_head)) { - eventfd_write(cq->virq, (eventfd_t)1); - } - STAILQ_INSERT_TAIL(&nvme->free_tasks, task, stailq); } @@ -570,6 +590,9 @@ nvme_worker(void *arg) } } + /* Completion Queue */ + spdk_nvme_cq_signal_fd(nvme); + return count; } @@ -716,6 +739,7 @@ vhost_nvme_create_io_cq(struct spdk_vhost_nvme_dev *nvme, cq->virq = -1; cq->cq_head = 0; cq->guest_signaled_cq_head = 0; + cq->need_signaled_cnt = 0; requested_len = sizeof(struct spdk_nvme_cpl) * cq->size; cq->cq_cqe = spdk_vhost_gpa_to_vva(&nvme->vdev, dma_addr, requested_len); if (!cq->cq_cqe) {