vhost/nvme: move completion irq signal into IO poller context

Previously after IO is finished, we will put completion entry and
irq event notice at the same IO completion callback, for performance
consideration, move the irq event routine into IO poller context, this
can be used to implement interrupt coalescing feature in future.

Change-Id: Ic20b50af47b73ffcb91938802e18b316c07a4d11
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/408943
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Changpeng Liu 2018-04-25 05:25:48 -04:00 committed by Daniel Verkamp
parent cb094a03f0
commit 2cd9f3959d

View File

@ -72,6 +72,7 @@ struct spdk_vhost_nvme_cq {
volatile struct spdk_nvme_cpl *cq_cqe;
uint16_t cq_head;
uint16_t guest_signaled_cq_head;
uint32_t need_signaled_cnt;
STAILQ_HEAD(, spdk_vhost_nvme_task) cq_full_waited_tasks;
bool irq_enabled;
int virq;
@ -284,6 +285,28 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
return 0;
}
static void
spdk_nvme_cq_signal_fd(struct spdk_vhost_nvme_dev *nvme)
{
struct spdk_vhost_nvme_cq *cq;
uint32_t qid, cq_head;
assert(nvme != NULL);
for (qid = 1; qid <= MAX_IO_QUEUES; qid++) {
cq = spdk_vhost_nvme_get_cq_from_qid(nvme, qid);
if (!cq || !cq->valid) {
continue;
}
cq_head = nvme->dbbuf_dbs[cq_offset(qid, 1)];
if (cq->irq_enabled && cq->need_signaled_cnt && (cq->cq_head != cq_head)) {
eventfd_write(cq->virq, (eventfd_t)1);
cq->need_signaled_cnt = 0;
}
}
}
static void
blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
@ -338,14 +361,11 @@ blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg
cq->cq_cqe[cq->cq_head].status.p = cq->phase;
nvme_inc_cq_head(cq);
cq->need_signaled_cnt++;
/* MMIO Controll */
nvme->dbbuf_eis[cq_offset(cqid, 1)] = (uint32_t)(cq->guest_signaled_cq_head - 1);
if (cq->irq_enabled && (cq->cq_head != cq->guest_signaled_cq_head)) {
eventfd_write(cq->virq, (eventfd_t)1);
}
STAILQ_INSERT_TAIL(&nvme->free_tasks, task, stailq);
}
@ -570,6 +590,9 @@ nvme_worker(void *arg)
}
}
/* Completion Queue */
spdk_nvme_cq_signal_fd(nvme);
return count;
}
@ -716,6 +739,7 @@ vhost_nvme_create_io_cq(struct spdk_vhost_nvme_dev *nvme,
cq->virq = -1;
cq->cq_head = 0;
cq->guest_signaled_cq_head = 0;
cq->need_signaled_cnt = 0;
requested_len = sizeof(struct spdk_nvme_cpl) * cq->size;
cq->cq_cqe = spdk_vhost_gpa_to_vva(&nvme->vdev, dma_addr, requested_len);
if (!cq->cq_cqe) {