bhyve: virtio shares definitions between sys/dev/virtio

Definitions inside usr.sbin/bhyve/virtio.h are thrown away.
Definitions in sys/dev/virtio are used instead.

This reduces code duplication.

Sponsored by:	The FreeBSD Foundation
Reviewed by:	grehan
Approved by:	philip (mentor)
Differential Revision:	https://reviews.freebsd.org/D29084
This commit is contained in:
Ka Ho Ng 2021-03-16 19:27:38 +08:00 committed by Ka Ho Ng
parent 15b82e00a1
commit 54ac6f721e
10 changed files with 105 additions and 211 deletions

View File

@ -35,6 +35,8 @@
#include <dev/virtio/virtio_ids.h>
#include <dev/virtio/virtio_config.h>
#ifdef _KERNEL
struct sbuf;
struct vq_alloc_info;
@ -187,4 +189,6 @@ virtio_simple_probe(device_t dev, const struct virtio_pnp_match *match)
return (BUS_PROBE_DEFAULT);
}
#endif /* _KERNEL */
#endif /* _VIRTIO_H_ */

View File

@ -32,6 +32,9 @@
#define _VIRTIO_ENDIAN_H_
#include <sys/endian.h>
#ifndef _KERNEL
#include <stdbool.h>
#endif /* _KERNEL */
/*
* VirtIO V1 (modern) uses little endian, while legacy VirtIO uses the guest's

View File

@ -325,7 +325,7 @@ pci_vt9p_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_9P);
pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_9P);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_9P);
pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix()))

View File

@ -533,7 +533,7 @@ pci_vtblk_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_BLOCK);
pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_BLOCK);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_BLOCK);
pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (vi_intr_init(&sc->vbsc_vs, 1, fbsdrun_virtio_msix())) {

View File

@ -644,7 +644,7 @@ pci_vtcon_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_CONSOLE);
pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SIMPLECOMM);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_CONSOLE);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_CONSOLE);
pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix()))

View File

@ -670,7 +670,7 @@ pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET);
pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_NETWORK);
pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
/* Link is up if we managed to open backend device. */

View File

@ -190,7 +190,7 @@ pci_vtrnd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_RANDOM);
pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_CRYPTO);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_ENTROPY);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_ENTROPY);
pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (vi_intr_init(&sc->vrsc_vs, 1, fbsdrun_virtio_msix()))

View File

@ -720,7 +720,7 @@ pci_vtscsi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI);
pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_SCSI);
pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_SCSI);
pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix()))

View File

@ -36,6 +36,8 @@ __FBSDID("$FreeBSD$");
#include <machine/atomic.h>
#include <machine/vmm_snapshot.h>
#include <dev/virtio/pci/virtio_pci_legacy_var.h>
#include <stdio.h>
#include <stdint.h>
#include <pthread.h>
@ -127,10 +129,10 @@ vi_set_io_bar(struct virtio_softc *vs, int barnum)
size_t size;
/*
* ??? should we use CFG0 if MSI-X is disabled?
* ??? should we use VIRTIO_PCI_CONFIG_OFF(0) if MSI-X is disabled?
* Existing code did not...
*/
size = VTCFG_R_CFG1 + vs->vs_vc->vc_cfgsize;
size = VIRTIO_PCI_CONFIG_OFF(1) + vs->vs_vc->vc_cfgsize;
pci_emul_alloc_bar(vs->vs_pi, barnum, PCIBAR_IO, size);
}
@ -182,12 +184,12 @@ vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
vq = &vs->vs_queues[vs->vs_curq];
vq->vq_pfn = pfn;
phys = (uint64_t)pfn << VRING_PFN;
size = vring_size(vq->vq_qsize);
size = vring_size_aligned(vq->vq_qsize);
base = paddr_guest2host(vs->vs_pi->pi_vmctx, phys, size);
/* First page(s) are descriptors... */
vq->vq_desc = (struct virtio_desc *)base;
base += vq->vq_qsize * sizeof(struct virtio_desc);
vq->vq_desc = (struct vring_desc *)base;
base += vq->vq_qsize * sizeof(struct vring_desc);
/* ... immediately followed by "avail" ring (entirely uint16_t's) */
vq->vq_avail = (struct vring_avail *)base;
@ -211,15 +213,15 @@ vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
* descriptor.
*/
static inline void
_vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
_vq_record(int i, volatile struct vring_desc *vd, struct vmctx *ctx,
struct iovec *iov, int n_iov, uint16_t *flags) {
if (i >= n_iov)
return;
iov[i].iov_base = paddr_guest2host(ctx, vd->vd_addr, vd->vd_len);
iov[i].iov_len = vd->vd_len;
iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len);
iov[i].iov_len = vd->len;
if (flags != NULL)
flags[i] = vd->vd_flags;
flags[i] = vd->flags;
}
#define VQ_MAX_DESCRIPTORS 512 /* see below */
@ -236,7 +238,7 @@ _vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
* i.e., we do not count the indirect descriptors, only the "real"
* ones.
*
* Basically, this vets the vd_flags and vd_next field of each
* Basically, this vets the "flags" and "next" field of each
* descriptor and tells you how many are involved. Since some may
* be indirect, this also needs the vmctx (in the pci_devinst
* at vs->vs_pi) so that it can find indirect descriptors.
@ -253,7 +255,7 @@ _vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
*
* If you want to verify the WRITE flag on each descriptor, pass a
* non-NULL "flags" pointer to an array of "uint16_t" of the same size
* as n_iov and we'll copy each vd_flags field after unwinding any
* as n_iov and we'll copy each "flags" field after unwinding any
* indirects.
*
* If some descriptor(s) are invalid, this prints a diagnostic message
@ -269,7 +271,7 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
int i;
u_int ndesc, n_indir;
u_int idx, next;
volatile struct virtio_desc *vdir, *vindir, *vp;
volatile struct vring_desc *vdir, *vindir, *vp;
struct vmctx *ctx;
struct virtio_softc *vs;
const char *name;
@ -279,11 +281,11 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
/*
* Note: it's the responsibility of the guest not to
* update vq->vq_avail->va_idx until all of the descriptors
* update vq->vq_avail->idx until all of the descriptors
* the guest has written are valid (including all their
* vd_next fields and vd_flags).
* "next" fields and "flags").
*
* Compute (va_idx - last_avail) in integers mod 2**16. This is
* Compute (vq_avail->idx - last_avail) in integers mod 2**16. This is
* the number of descriptors the device has made available
* since the last time we updated vq->vq_last_avail.
*
@ -291,7 +293,7 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
* then trim off excess bits.
*/
idx = vq->vq_last_avail;
ndesc = (uint16_t)((u_int)vq->vq_avail->va_idx - idx);
ndesc = (uint16_t)((u_int)vq->vq_avail->idx - idx);
if (ndesc == 0)
return (0);
if (ndesc > vq->vq_qsize) {
@ -311,9 +313,9 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
* index, but we just abort if the count gets excessive.
*/
ctx = vs->vs_pi->pi_vmctx;
*pidx = next = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
*pidx = next = vq->vq_avail->ring[idx & (vq->vq_qsize - 1)];
vq->vq_last_avail++;
for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->vd_next) {
for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) {
if (next >= vq->vq_qsize) {
EPRINTLN(
"%s: descriptor index %u out of range, "
@ -322,7 +324,7 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
return (-1);
}
vdir = &vq->vq_desc[next];
if ((vdir->vd_flags & VRING_DESC_F_INDIRECT) == 0) {
if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) {
_vq_record(i, vdir, ctx, iov, n_iov, flags);
i++;
} else if ((vs->vs_vc->vc_hv_caps &
@ -333,16 +335,16 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
name);
return (-1);
} else {
n_indir = vdir->vd_len / 16;
if ((vdir->vd_len & 0xf) || n_indir == 0) {
n_indir = vdir->len / 16;
if ((vdir->len & 0xf) || n_indir == 0) {
EPRINTLN(
"%s: invalid indir len 0x%x, "
"driver confused?",
name, (u_int)vdir->vd_len);
name, (u_int)vdir->len);
return (-1);
}
vindir = paddr_guest2host(ctx,
vdir->vd_addr, vdir->vd_len);
vdir->addr, vdir->len);
/*
* Indirects start at the 0th, then follow
* their own embedded "next"s until those run
@ -353,7 +355,7 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
next = 0;
for (;;) {
vp = &vindir[next];
if (vp->vd_flags & VRING_DESC_F_INDIRECT) {
if (vp->flags & VRING_DESC_F_INDIRECT) {
EPRINTLN(
"%s: indirect desc has INDIR flag,"
" driver confused?",
@ -363,9 +365,9 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
_vq_record(i, vp, ctx, iov, n_iov, flags);
if (++i > VQ_MAX_DESCRIPTORS)
goto loopy;
if ((vp->vd_flags & VRING_DESC_F_NEXT) == 0)
if ((vp->flags & VRING_DESC_F_NEXT) == 0)
break;
next = vp->vd_next;
next = vp->next;
if (next >= n_indir) {
EPRINTLN(
"%s: invalid next %u > %u, "
@ -375,7 +377,7 @@ vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
}
}
}
if ((vdir->vd_flags & VRING_DESC_F_NEXT) == 0)
if ((vdir->flags & VRING_DESC_F_NEXT) == 0)
return (i);
}
loopy:
@ -402,7 +404,7 @@ void
vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
{
volatile struct vring_used *vuh;
volatile struct virtio_used *vue;
volatile struct vring_used_elem *vue;
uint16_t mask;
/*
@ -410,16 +412,13 @@ vq_relchain_prepare(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
* - mask is N-1 where N is a power of 2 so computes x % N
* - vuh points to the "used" data shared with guest
* - vue points to the "used" ring entry we want to update
*
* (I apologize for the two fields named vu_idx; the
* virtio spec calls the one that vue points to, "id"...)
*/
mask = vq->vq_qsize - 1;
vuh = vq->vq_used;
vue = &vuh->vu_ring[vq->vq_next_used++ & mask];
vue->vu_idx = idx;
vue->vu_tlen = iolen;
vue = &vuh->ring[vq->vq_next_used++ & mask];
vue->id = idx;
vue->len = iolen;
}
void
@ -431,7 +430,7 @@ vq_relchain_publish(struct vqueue_info *vq)
* (and even on x86 to act as a compiler barrier).
*/
atomic_thread_fence_rel();
vq->vq_used->vu_idx = vq->vq_next_used;
vq->vq_used->idx = vq->vq_next_used;
}
/*
@ -481,12 +480,12 @@ vq_endchains(struct vqueue_info *vq, int used_all_avail)
*/
vs = vq->vq_vs;
old_idx = vq->vq_save_used;
vq->vq_save_used = new_idx = vq->vq_used->vu_idx;
vq->vq_save_used = new_idx = vq->vq_used->idx;
/*
* Use full memory barrier between vu_idx store from preceding
* Use full memory barrier between "idx" store from preceding
* vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or
* va_flags below.
* "flags" field below.
*/
atomic_thread_fence_seq_cst();
if (used_all_avail &&
@ -502,7 +501,7 @@ vq_endchains(struct vqueue_info *vq, int used_all_avail)
(uint16_t)(new_idx - old_idx);
} else {
intr = new_idx != old_idx &&
!(vq->vq_avail->va_flags & VRING_AVAIL_F_NO_INTERRUPT);
!(vq->vq_avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
}
if (intr)
vq_interrupt(vs, vq);
@ -515,16 +514,16 @@ static struct config_reg {
uint8_t cr_ro; /* true => reg is read only */
const char *cr_name; /* name of reg */
} config_regs[] = {
{ VTCFG_R_HOSTCAP, 4, 1, "HOSTCAP" },
{ VTCFG_R_GUESTCAP, 4, 0, "GUESTCAP" },
{ VTCFG_R_PFN, 4, 0, "PFN" },
{ VTCFG_R_QNUM, 2, 1, "QNUM" },
{ VTCFG_R_QSEL, 2, 0, "QSEL" },
{ VTCFG_R_QNOTIFY, 2, 0, "QNOTIFY" },
{ VTCFG_R_STATUS, 1, 0, "STATUS" },
{ VTCFG_R_ISR, 1, 0, "ISR" },
{ VTCFG_R_CFGVEC, 2, 0, "CFGVEC" },
{ VTCFG_R_QVEC, 2, 0, "QVEC" },
{ VIRTIO_PCI_HOST_FEATURES, 4, 1, "HOST_FEATURES" },
{ VIRTIO_PCI_GUEST_FEATURES, 4, 0, "GUEST_FEATURES" },
{ VIRTIO_PCI_QUEUE_PFN, 4, 0, "QUEUE_PFN" },
{ VIRTIO_PCI_QUEUE_NUM, 2, 1, "QUEUE_NUM" },
{ VIRTIO_PCI_QUEUE_SEL, 2, 0, "QUEUE_SEL" },
{ VIRTIO_PCI_QUEUE_NOTIFY, 2, 0, "QUEUE_NOTIFY" },
{ VIRTIO_PCI_STATUS, 1, 0, "STATUS" },
{ VIRTIO_PCI_ISR, 1, 0, "ISR" },
{ VIRTIO_MSI_CONFIG_VECTOR, 2, 0, "CONFIG_VECTOR" },
{ VIRTIO_MSI_QUEUE_VECTOR, 2, 0, "QUEUE_VECTOR" },
};
static inline struct config_reg *
@ -586,10 +585,7 @@ vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
if (size != 1 && size != 2 && size != 4)
goto bad;
if (pci_msix_enabled(pi))
virtio_config_size = VTCFG_R_CFG1;
else
virtio_config_size = VTCFG_R_CFG0;
virtio_config_size = VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi));
if (offset >= virtio_config_size) {
/*
@ -623,39 +619,39 @@ vi_pci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
}
switch (offset) {
case VTCFG_R_HOSTCAP:
case VIRTIO_PCI_HOST_FEATURES:
value = vc->vc_hv_caps;
break;
case VTCFG_R_GUESTCAP:
case VIRTIO_PCI_GUEST_FEATURES:
value = vs->vs_negotiated_caps;
break;
case VTCFG_R_PFN:
case VIRTIO_PCI_QUEUE_PFN:
if (vs->vs_curq < vc->vc_nvq)
value = vs->vs_queues[vs->vs_curq].vq_pfn;
break;
case VTCFG_R_QNUM:
case VIRTIO_PCI_QUEUE_NUM:
value = vs->vs_curq < vc->vc_nvq ?
vs->vs_queues[vs->vs_curq].vq_qsize : 0;
break;
case VTCFG_R_QSEL:
case VIRTIO_PCI_QUEUE_SEL:
value = vs->vs_curq;
break;
case VTCFG_R_QNOTIFY:
case VIRTIO_PCI_QUEUE_NOTIFY:
value = 0; /* XXX */
break;
case VTCFG_R_STATUS:
case VIRTIO_PCI_STATUS:
value = vs->vs_status;
break;
case VTCFG_R_ISR:
case VIRTIO_PCI_ISR:
value = vs->vs_isr;
vs->vs_isr = 0; /* a read clears this flag */
if (value)
pci_lintr_deassert(pi);
break;
case VTCFG_R_CFGVEC:
case VIRTIO_MSI_CONFIG_VECTOR:
value = vs->vs_msix_cfg_idx;
break;
case VTCFG_R_QVEC:
case VIRTIO_MSI_QUEUE_VECTOR:
value = vs->vs_curq < vc->vc_nvq ?
vs->vs_queues[vs->vs_curq].vq_msix_idx :
VIRTIO_MSI_NO_VECTOR;
@ -706,10 +702,7 @@ vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
if (size != 1 && size != 2 && size != 4)
goto bad;
if (pci_msix_enabled(pi))
virtio_config_size = VTCFG_R_CFG1;
else
virtio_config_size = VTCFG_R_CFG0;
virtio_config_size = VIRTIO_PCI_CONFIG_OFF(pci_msix_enabled(pi));
if (offset >= virtio_config_size) {
/*
@ -747,18 +740,18 @@ vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
}
switch (offset) {
case VTCFG_R_GUESTCAP:
case VIRTIO_PCI_GUEST_FEATURES:
vs->vs_negotiated_caps = value & vc->vc_hv_caps;
if (vc->vc_apply_features)
(*vc->vc_apply_features)(DEV_SOFTC(vs),
vs->vs_negotiated_caps);
break;
case VTCFG_R_PFN:
case VIRTIO_PCI_QUEUE_PFN:
if (vs->vs_curq >= vc->vc_nvq)
goto bad_qindex;
vi_vq_init(vs, value);
break;
case VTCFG_R_QSEL:
case VIRTIO_PCI_QUEUE_SEL:
/*
* Note that the guest is allowed to select an
* invalid queue; we just need to return a QNUM
@ -766,7 +759,7 @@ vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
*/
vs->vs_curq = value;
break;
case VTCFG_R_QNOTIFY:
case VIRTIO_PCI_QUEUE_NOTIFY:
if (value >= vc->vc_nvq) {
EPRINTLN("%s: queue %d notify out of range",
name, (int)value);
@ -782,15 +775,15 @@ vi_pci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
"%s: qnotify queue %d: missing vq/vc notify",
name, (int)value);
break;
case VTCFG_R_STATUS:
case VIRTIO_PCI_STATUS:
vs->vs_status = value;
if (value == 0)
(*vc->vc_reset)(DEV_SOFTC(vs));
break;
case VTCFG_R_CFGVEC:
case VIRTIO_MSI_CONFIG_VECTOR:
vs->vs_msix_cfg_idx = value;
break;
case VTCFG_R_QVEC:
case VIRTIO_MSI_QUEUE_VECTOR:
if (vs->vs_curq >= vc->vc_nvq)
goto bad_qindex;
vq = &vs->vs_queues[vs->vs_curq];
@ -896,7 +889,7 @@ vi_pci_snapshot_queues(struct virtio_softc *vs, struct vm_snapshot_meta *meta)
SNAPSHOT_VAR_OR_LEAVE(vq->vq_pfn, meta, ret, done);
addr_size = vq->vq_qsize * sizeof(struct virtio_desc);
addr_size = vq->vq_qsize * sizeof(struct vring_desc);
SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_desc, addr_size,
false, meta, ret, done);
@ -908,8 +901,8 @@ vi_pci_snapshot_queues(struct virtio_softc *vs, struct vm_snapshot_meta *meta)
SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(vq->vq_used, addr_size,
false, meta, ret, done);
SNAPSHOT_BUF_OR_LEAVE(vq->vq_desc, vring_size(vq->vq_qsize),
meta, ret, done);
SNAPSHOT_BUF_OR_LEAVE(vq->vq_desc,
vring_size_aligned(vq->vq_qsize), meta, ret, done);
}
done:

View File

@ -28,11 +28,15 @@
* $FreeBSD$
*/
#ifndef _VIRTIO_H_
#define _VIRTIO_H_
#ifndef _BHYVE_VIRTIO_H_
#define _BHYVE_VIRTIO_H_
#include <machine/atomic.h>
#include <dev/virtio/virtio.h>
#include <dev/virtio/virtio_ring.h>
#include <dev/virtio/pci/virtio_pci_var.h>
/*
* These are derived from several virtio specifications.
*
@ -125,39 +129,6 @@
*/
#define VRING_ALIGN 4096
#define VRING_DESC_F_NEXT (1 << 0)
#define VRING_DESC_F_WRITE (1 << 1)
#define VRING_DESC_F_INDIRECT (1 << 2)
struct virtio_desc { /* AKA vring_desc */
uint64_t vd_addr; /* guest physical address */
uint32_t vd_len; /* length of scatter/gather seg */
uint16_t vd_flags; /* VRING_F_DESC_* */
uint16_t vd_next; /* next desc if F_NEXT */
} __packed;
struct virtio_used { /* AKA vring_used_elem */
uint32_t vu_idx; /* head of used descriptor chain */
uint32_t vu_tlen; /* length written-to */
} __packed;
#define VRING_AVAIL_F_NO_INTERRUPT 1
struct vring_avail {
uint16_t va_flags; /* VRING_AVAIL_F_* */
uint16_t va_idx; /* counts to 65535, then cycles */
uint16_t va_ring[]; /* size N, reported in QNUM value */
/* uint16_t va_used_event; -- after N ring entries */
} __packed;
#define VRING_USED_F_NO_NOTIFY 1
struct vring_used {
uint16_t vu_flags; /* VRING_USED_F_* */
uint16_t vu_idx; /* counts to 65535, then cycles */
struct virtio_used vu_ring[]; /* size N */
/* uint16_t vu_avail_event; -- after N ring entries */
} __packed;
/*
* The address of any given virtual queue is determined by a single
* Page Frame Number register. The guest writes the PFN into the
@ -190,23 +161,6 @@ struct vring_used {
*/
#define VRING_PFN 12
/*
* Virtio device types
*
* XXX Should really be merged with <dev/virtio/virtio.h> defines
*/
#define VIRTIO_TYPE_NET 1
#define VIRTIO_TYPE_BLOCK 2
#define VIRTIO_TYPE_CONSOLE 3
#define VIRTIO_TYPE_ENTROPY 4
#define VIRTIO_TYPE_BALLOON 5
#define VIRTIO_TYPE_IOMEMORY 6
#define VIRTIO_TYPE_RPMSG 7
#define VIRTIO_TYPE_SCSI 8
#define VIRTIO_TYPE_9P 9
/* experimental IDs start at 65535 and work down */
/*
* PCI vendor/device IDs
*/
@ -218,71 +172,11 @@ struct vring_used {
#define VIRTIO_DEV_SCSI 0x1008
#define VIRTIO_DEV_9P 0x1009
/*
* PCI config space constants.
*
* If MSI-X is enabled, the ISR register is generally not used,
* and the configuration vector and queue vector appear at offsets
* 20 and 22 with the remaining configuration registers at 24.
* If MSI-X is not enabled, those two registers disappear and
* the remaining configuration registers start at offset 20.
*/
#define VTCFG_R_HOSTCAP 0
#define VTCFG_R_GUESTCAP 4
#define VTCFG_R_PFN 8
#define VTCFG_R_QNUM 12
#define VTCFG_R_QSEL 14
#define VTCFG_R_QNOTIFY 16
#define VTCFG_R_STATUS 18
#define VTCFG_R_ISR 19
#define VTCFG_R_CFGVEC 20
#define VTCFG_R_QVEC 22
#define VTCFG_R_CFG0 20 /* No MSI-X */
#define VTCFG_R_CFG1 24 /* With MSI-X */
#define VTCFG_R_MSIX 20
/*
* Bits in VTCFG_R_STATUS. Guests need not actually set any of these,
* but a guest writing 0 to this register means "please reset".
*/
#define VTCFG_STATUS_ACK 0x01 /* guest OS has acknowledged dev */
#define VTCFG_STATUS_DRIVER 0x02 /* guest OS driver is loaded */
#define VTCFG_STATUS_DRIVER_OK 0x04 /* guest OS driver ready */
#define VTCFG_STATUS_FAILED 0x80 /* guest has given up on this dev */
/*
* Bits in VTCFG_R_ISR. These apply only if not using MSI-X.
*
* (We don't [yet?] ever use CONF_CHANGED.)
*/
#define VTCFG_ISR_QUEUES 0x01 /* re-scan queues */
#define VTCFG_ISR_CONF_CHANGED 0x80 /* configuration changed */
#define VIRTIO_MSI_NO_VECTOR 0xFFFF
/*
* Feature flags.
* Note: bits 0 through 23 are reserved to each device type.
*/
#define VIRTIO_F_NOTIFY_ON_EMPTY (1 << 24)
#define VIRTIO_RING_F_INDIRECT_DESC (1 << 28)
#define VIRTIO_RING_F_EVENT_IDX (1 << 29)
/* From section 2.3, "Virtqueue Configuration", of the virtio specification */
static inline size_t
vring_size(u_int qsz)
static inline int
vring_size_aligned(u_int qsz)
{
size_t size;
/* constant 3 below = va_flags, va_idx, va_used_event */
size = sizeof(struct virtio_desc) * qsz + sizeof(uint16_t) * (3 + qsz);
size = roundup2(size, VRING_ALIGN);
/* constant 3 below = vu_flags, vu_idx, vu_avail_event */
size += sizeof(uint16_t) * 3 + sizeof(struct virtio_used) * qsz;
size = roundup2(size, VRING_ALIGN);
return (size);
return (roundup2(vring_size(qsz, VRING_ALIGN), VRING_ALIGN));
}
struct vmctx;
@ -397,23 +291,23 @@ struct vqueue_info {
uint16_t vq_num; /* we're the num'th queue in the softc */
uint16_t vq_flags; /* flags (see above) */
uint16_t vq_last_avail; /* a recent value of vq_avail->va_idx */
uint16_t vq_last_avail; /* a recent value of vq_avail->idx */
uint16_t vq_next_used; /* index of the next used slot to be filled */
uint16_t vq_save_used; /* saved vq_used->vu_idx; see vq_endchains */
uint16_t vq_save_used; /* saved vq_used->idx; see vq_endchains */
uint16_t vq_msix_idx; /* MSI-X index, or VIRTIO_MSI_NO_VECTOR */
uint32_t vq_pfn; /* PFN of virt queue (not shifted!) */
volatile struct virtio_desc *vq_desc; /* descriptor array */
volatile struct vring_desc *vq_desc; /* descriptor array */
volatile struct vring_avail *vq_avail; /* the "avail" ring */
volatile struct vring_used *vq_used; /* the "used" ring */
};
/* as noted above, these are sort of backwards, name-wise */
#define VQ_AVAIL_EVENT_IDX(vq) \
(*(volatile uint16_t *)&(vq)->vq_used->vu_ring[(vq)->vq_qsize])
(*(volatile uint16_t *)&(vq)->vq_used->ring[(vq)->vq_qsize])
#define VQ_USED_EVENT_IDX(vq) \
((vq)->vq_avail->va_ring[(vq)->vq_qsize])
((vq)->vq_avail->ring[(vq)->vq_qsize])
/*
* Is this ring ready for I/O?
@ -434,7 +328,7 @@ vq_has_descs(struct vqueue_info *vq)
{
return (vq_ring_ready(vq) && vq->vq_last_avail !=
vq->vq_avail->va_idx);
vq->vq_avail->idx);
}
/*
@ -449,7 +343,7 @@ vq_interrupt(struct virtio_softc *vs, struct vqueue_info *vq)
pci_generate_msix(vs->vs_pi, vq->vq_msix_idx);
else {
VS_LOCK(vs);
vs->vs_isr |= VTCFG_ISR_QUEUES;
vs->vs_isr |= VIRTIO_PCI_ISR_INTR;
pci_generate_msi(vs->vs_pi, 0);
pci_lintr_assert(vs->vs_pi);
VS_UNLOCK(vs);
@ -460,11 +354,11 @@ static inline void
vq_kick_enable(struct vqueue_info *vq)
{
vq->vq_used->vu_flags &= ~VRING_USED_F_NO_NOTIFY;
vq->vq_used->flags &= ~VRING_USED_F_NO_NOTIFY;
/*
* Full memory barrier to make sure the store to vu_flags
* happens before the load from va_idx, which results from
* a subsequent call to vq_has_descs().
* Full memory barrier to make sure the store to vq_used->flags
* happens before the load from vq_avail->idx, which results from a
* subsequent call to vq_has_descs().
*/
atomic_thread_fence_seq_cst();
}
@ -473,7 +367,7 @@ static inline void
vq_kick_disable(struct vqueue_info *vq)
{
vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY;
vq->vq_used->flags |= VRING_USED_F_NO_NOTIFY;
}
struct iovec;
@ -502,4 +396,4 @@ int vi_pci_snapshot(struct vm_snapshot_meta *meta);
int vi_pci_pause(struct vmctx *ctx, struct pci_devinst *pi);
int vi_pci_resume(struct vmctx *ctx, struct pci_devinst *pi);
#endif
#endif /* _VIRTIO_H_ */
#endif /* _BHYVE_VIRTIO_H_ */