bdev/virtio: support event index
This fixes intermittent failures with QEMU's virtio-scsi-pci device. Apparently, from time to time QEMU enters a broken state in which it doesn't turn off the NO_NOTIFY flag. This flag should be set only for the period of time virtqueues are being processed, but QEMU makes it set all the time. This makes us not signal any I/O submissions - SPDK thinks the device is currently processing notifications so it will pick up our I/O automatically, but in realitly it won't and we end up with a deadlock. I believe kernel virtio driver doesn't hit this issue because of event index feature. If negotiated, the NO_NOTIFY flag won't be used at all. Initial tests show that the issue is indeed gone with this patch. Event index for SPDK virtio is not particularly useful when using a poll-mode device, but it doesn't do any harm. It can be further optimized in the future, but macro-benchmarks don't show any performance difference compared with the legacy notification handling so there's no hurry. Change-Id: I46e8ab0da170780fcdc0dd239208670ee5ed6357 Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com> Reviewed-on: https://review.gerrithub.io/415591 Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
parent
587002f511
commit
03b12a98b2
@ -169,6 +169,7 @@ struct virtqueue {
|
||||
|
||||
uint16_t req_start;
|
||||
uint16_t req_end;
|
||||
uint16_t reqs_finished;
|
||||
|
||||
struct vq_desc_extra vq_descx[0];
|
||||
};
|
||||
|
@ -79,7 +79,8 @@ struct bdev_virtio_blk_io_channel {
|
||||
(1ULL << VIRTIO_BLK_F_BLK_SIZE | \
|
||||
1ULL << VIRTIO_BLK_F_TOPOLOGY | \
|
||||
1ULL << VIRTIO_BLK_F_MQ | \
|
||||
1ULL << VIRTIO_BLK_F_RO)
|
||||
1ULL << VIRTIO_BLK_F_RO | \
|
||||
1ULL << VIRTIO_RING_F_EVENT_IDX)
|
||||
|
||||
static int bdev_virtio_initialize(void);
|
||||
static int bdev_virtio_blk_get_ctx_size(void);
|
||||
|
@ -192,7 +192,8 @@ static bool g_bdev_virtio_finish = false;
|
||||
/* Features desired/implemented by this driver. */
|
||||
#define VIRTIO_SCSI_DEV_SUPPORTED_FEATURES \
|
||||
(1ULL << VIRTIO_SCSI_F_INOUT | \
|
||||
1ULL << VIRTIO_SCSI_F_HOTPLUG)
|
||||
1ULL << VIRTIO_SCSI_F_HOTPLUG | \
|
||||
1ULL << VIRTIO_RING_F_EVENT_IDX)
|
||||
|
||||
static void virtio_scsi_dev_unregister_cb(void *io_device);
|
||||
static void virtio_scsi_dev_remove(struct virtio_scsi_dev *svdev,
|
||||
|
@ -96,12 +96,21 @@ virtio_init_vring(struct virtqueue *vq)
|
||||
vq->vq_free_cnt = vq->vq_nentries;
|
||||
vq->req_start = VQ_RING_DESC_CHAIN_END;
|
||||
vq->req_end = VQ_RING_DESC_CHAIN_END;
|
||||
vq->reqs_finished = 0;
|
||||
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
|
||||
|
||||
vring_desc_init(vr->desc, size);
|
||||
|
||||
/* Tell the backend not to interrupt us. */
|
||||
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||
/* Tell the backend not to interrupt us.
|
||||
* If F_EVENT_IDX is negotiated, we will always set incredibly high
|
||||
* used event idx, so that we will practically never receive an
|
||||
* interrupt. See virtqueue_req_flush()
|
||||
*/
|
||||
if (vq->vdev->negotiated_features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
|
||||
vring_used_event(&vq->vq_ring) = UINT16_MAX;
|
||||
} else {
|
||||
vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
@ -437,6 +446,7 @@ finish_req(struct virtqueue *vq)
|
||||
vq->req_end = VQ_RING_DESC_CHAIN_END;
|
||||
virtio_wmb();
|
||||
vq->vq_ring.avail->idx = vq->vq_avail_idx;
|
||||
vq->reqs_finished++;
|
||||
}
|
||||
|
||||
int
|
||||
@ -463,6 +473,8 @@ virtqueue_req_start(struct virtqueue *vq, void *cookie, int iovcnt)
|
||||
void
|
||||
virtqueue_req_flush(struct virtqueue *vq)
|
||||
{
|
||||
uint16_t reqs_finished;
|
||||
|
||||
if (vq->req_end == VQ_RING_DESC_CHAIN_END) {
|
||||
/* no non-empty requests have been started */
|
||||
return;
|
||||
@ -471,10 +483,26 @@ virtqueue_req_flush(struct virtqueue *vq)
|
||||
finish_req(vq);
|
||||
virtio_mb();
|
||||
|
||||
if (spdk_unlikely(!(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY))) {
|
||||
virtio_dev_backend_ops(vq->vdev)->notify_queue(vq->vdev, vq);
|
||||
SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "Notified backend after xmit\n");
|
||||
reqs_finished = vq->reqs_finished;
|
||||
vq->reqs_finished = 0;
|
||||
|
||||
if (vq->vdev->negotiated_features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
|
||||
/* Set used event idx to a value the device will never reach.
|
||||
* This effectively disables interrupts.
|
||||
*/
|
||||
vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx - vq->vq_nentries - 1;
|
||||
|
||||
if (!vring_need_event(vring_avail_event(&vq->vq_ring),
|
||||
vq->vq_avail_idx,
|
||||
vq->vq_avail_idx - reqs_finished)) {
|
||||
return;
|
||||
}
|
||||
} else if (vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) {
|
||||
return;
|
||||
}
|
||||
|
||||
virtio_dev_backend_ops(vq->vdev)->notify_queue(vq->vdev, vq);
|
||||
SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "Notified backend after xmit\n");
|
||||
}
|
||||
|
||||
void
|
||||
|
Loading…
Reference in New Issue
Block a user