Modify virtqueue helpers added in r253440 to allow queuing.

Original virtqueue design allows queued and out-of-order processing, but
helpers added in r253440 suppose only direct blocking in-order one.
It could be fine for network, etc., but it is a huge limitation for storage
devices.
This commit is contained in:
Alexander Motin 2015-03-15 11:37:07 +00:00
parent e509c88862
commit fdb7e97f87
5 changed files with 24 additions and 39 deletions

View File

@ -170,9 +170,9 @@ pci_vtblk_proc(struct pci_vtblk_softc *sc, struct vqueue_info *vq)
int writeop, type;
off_t offset;
struct iovec iov[VTBLK_MAXSEGS + 2];
uint16_t flags[VTBLK_MAXSEGS + 2];
uint16_t idx, flags[VTBLK_MAXSEGS + 2];
n = vq_getchain(vq, iov, VTBLK_MAXSEGS + 2, flags);
n = vq_getchain(vq, &idx, iov, VTBLK_MAXSEGS + 2, flags);
/*
* The first descriptor will be the read-only fixed header,
@ -258,7 +258,7 @@ pci_vtblk_proc(struct pci_vtblk_softc *sc, struct vqueue_info *vq)
* Return the descriptor back to the host.
* We wrote 1 byte (our status) to host.
*/
vq_relchain(vq, 1);
vq_relchain(vq, idx, 1);
}
static void
@ -266,7 +266,6 @@ pci_vtblk_notify(void *vsc, struct vqueue_info *vq)
{
struct pci_vtblk_softc *sc = vsc;
vq_startchains(vq);
while (vq_has_descs(vq))
pci_vtblk_proc(sc, vq);
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */

View File

@ -288,6 +288,7 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
struct vqueue_info *vq;
void *vrx;
int len, n;
uint16_t idx;
/*
* Should never be called without a valid tap fd
@ -310,7 +311,6 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
* Check for available rx buffers
*/
vq = &sc->vsc_queues[VTNET_RXQ];
vq_startchains(vq);
if (!vq_has_descs(vq)) {
/*
* Drop the packet and try later. Interrupt on
@ -325,7 +325,7 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
/*
* Get descriptor chain.
*/
n = vq_getchain(vq, iov, VTNET_MAXSEGS, NULL);
n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
assert(n >= 1 && n <= VTNET_MAXSEGS);
/*
@ -362,7 +362,7 @@ pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
/*
* Release this chain and handle more chains.
*/
vq_relchain(vq, len + sc->rx_vhdrlen);
vq_relchain(vq, idx, len + sc->rx_vhdrlen);
} while (vq_has_descs(vq));
/* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
@ -401,13 +401,14 @@ pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
struct iovec iov[VTNET_MAXSEGS + 1];
int i, n;
int plen, tlen;
uint16_t idx;
/*
* Obtain chain of descriptors. The first one is
* really the header descriptor, so we need to sum
* up two lengths: packet length and transfer length.
*/
n = vq_getchain(vq, iov, VTNET_MAXSEGS, NULL);
n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
assert(n >= 1 && n <= VTNET_MAXSEGS);
plen = 0;
tlen = iov[0].iov_len;
@ -420,7 +421,7 @@ pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
pci_vtnet_tap_tx(sc, &iov[1], n - 1, plen);
/* chain is processed, release it and set tlen */
vq_relchain(vq, tlen);
vq_relchain(vq, idx, tlen);
}
static void
@ -479,7 +480,6 @@ pci_vtnet_tx_thread(void *param)
sc->tx_in_progress = 1;
pthread_mutex_unlock(&sc->tx_mtx);
vq_startchains(vq);
do {
/*
* Run through entries, placing them into

View File

@ -103,18 +103,17 @@ pci_vtrnd_notify(void *vsc, struct vqueue_info *vq)
struct iovec iov;
struct pci_vtrnd_softc *sc;
int len;
uint16_t idx;
sc = vsc;
vq_startchains(vq);
if (sc->vrsc_fd < 0) {
vq_endchains(vq, 0);
return;
}
while (vq_has_descs(vq)) {
vq_getchain(vq, &iov, 1, NULL);
vq_getchain(vq, &idx, &iov, 1, NULL);
len = read(sc->vrsc_fd, iov.iov_base, iov.iov_len);
@ -126,7 +125,7 @@ pci_vtrnd_notify(void *vsc, struct vqueue_info *vq)
/*
* Release this chain and handle more
*/
vq_relchain(vq, len);
vq_relchain(vq, idx, len);
}
vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
}

View File

@ -97,6 +97,7 @@ vi_reset_dev(struct virtio_softc *vs)
for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) {
vq->vq_flags = 0;
vq->vq_last_avail = 0;
vq->vq_save_used = 0;
vq->vq_pfn = 0;
vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR;
}
@ -188,6 +189,7 @@ vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
/* Mark queue as allocated, and start at 0 when we use it. */
vq->vq_flags = VQ_ALLOC;
vq->vq_last_avail = 0;
vq->vq_save_used = 0;
}
/*
@ -247,12 +249,12 @@ _vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
* that vq_has_descs() does one).
*/
int
vq_getchain(struct vqueue_info *vq,
vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
struct iovec *iov, int n_iov, uint16_t *flags)
{
int i;
u_int ndesc, n_indir;
u_int idx, head, next;
u_int idx, next;
volatile struct virtio_desc *vdir, *vindir, *vp;
struct vmctx *ctx;
struct virtio_softc *vs;
@ -295,8 +297,8 @@ vq_getchain(struct vqueue_info *vq,
* index, but we just abort if the count gets excessive.
*/
ctx = vs->vs_pi->pi_vmctx;
head = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
next = head;
*pidx = next = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
vq->vq_last_avail++;
for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->vd_next) {
if (next >= vq->vq_qsize) {
fprintf(stderr,
@ -377,9 +379,9 @@ vq_getchain(struct vqueue_info *vq,
* and used its positive return value.)
*/
void
vq_relchain(struct vqueue_info *vq, uint32_t iolen)
vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
{
uint16_t head, uidx, mask;
uint16_t uidx, mask;
volatile struct vring_used *vuh;
volatile struct virtio_used *vue;
@ -395,11 +397,10 @@ vq_relchain(struct vqueue_info *vq, uint32_t iolen)
*/
mask = vq->vq_qsize - 1;
vuh = vq->vq_used;
head = vq->vq_avail->va_ring[vq->vq_last_avail++ & mask];
uidx = vuh->vu_idx;
vue = &vuh->vu_ring[uidx++ & mask];
vue->vu_idx = head; /* ie, vue->id = head */
vue->vu_idx = idx;
vue->vu_tlen = iolen;
vuh->vu_idx = uidx;
}
@ -436,8 +437,8 @@ vq_endchains(struct vqueue_info *vq, int used_all_avail)
* entire avail was processed, we need to interrupt always.
*/
vs = vq->vq_vs;
new_idx = vq->vq_used->vu_idx;
old_idx = vq->vq_save_used;
vq->vq_save_used = new_idx = vq->vq_used->vu_idx;
if (used_all_avail &&
(vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
intr = 1;

View File

@ -424,20 +424,6 @@ vq_has_descs(struct vqueue_info *vq)
vq->vq_avail->va_idx);
}
/*
* Called by virtio driver as it starts processing chains. Each
* completed chain (obtained from vq_getchain()) is released by
* calling vq_relchain(), then when all are done, vq_endchains()
* can tell if / how-many chains were processed and know whether
* and how to generate an interrupt.
*/
static inline void
vq_startchains(struct vqueue_info *vq)
{
vq->vq_save_used = vq->vq_used->vu_idx;
}
/*
* Deliver an interrupt to guest on the given virtual queue
* (if possible, or a generic MSI interrupt if not using MSI-X).
@ -465,9 +451,9 @@ int vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix);
void vi_reset_dev(struct virtio_softc *);
void vi_set_io_bar(struct virtio_softc *, int);
int vq_getchain(struct vqueue_info *vq,
int vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
struct iovec *iov, int n_iov, uint16_t *flags);
void vq_relchain(struct vqueue_info *vq, uint32_t iolen);
void vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen);
void vq_endchains(struct vqueue_info *vq, int used_all_avail);
uint64_t vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,