76acc41fb7
Re-structure Xen HVM support so that: - Xen is detected and hypercalls can be performed very early in system startup. - Xen interrupt services are implemented using FreeBSD's native interrupt delivery infrastructure. - the Xen interrupt service implementation is shared between PV and HVM guests. - Xen interrupt handlers can optionally use a filter handler in order to avoid the overhead of dispatch to an interrupt thread. - interrupt load can be distributed among all available CPUs. - the overhead of accessing the emulated local and I/O apics on HVM is removed for event channel port events. - a similar optimization can eventually, and fairly easily, be used to optimize MSI. Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure, and misc Xen cleanups: Sponsored by: Spectra Logic Corporation Unification of PV & HVM interrupt infrastructure, bug fixes, and misc Xen cleanups: Submitted by: Roger Pau Monné Sponsored by: Citrix Systems R&D sys/x86/x86/local_apic.c: sys/amd64/include/apicvar.h: sys/i386/include/apicvar.h: sys/amd64/amd64/apic_vector.S: sys/i386/i386/apic_vector.s: sys/amd64/amd64/machdep.c: sys/i386/i386/machdep.c: sys/i386/xen/exception.s: sys/x86/include/segments.h: Reserve IDT vector 0x93 for the Xen event channel upcall interrupt handler. On Hypervisors that support the direct vector callback feature, we can request that this vector be called directly by an injected HVM interrupt event, instead of a simulated PCI interrupt on the Xen platform PCI device. This avoids all of the overhead of dealing with the emulated I/O APIC and local APIC. It also means that the Hypervisor can inject these events on any CPU, allowing upcalls for different ports to be handled in parallel. sys/amd64/amd64/mp_machdep.c: sys/i386/i386/mp_machdep.c: Map Xen per-vcpu area during AP startup. sys/amd64/include/intr_machdep.h: sys/i386/include/intr_machdep.h: Increase the FreeBSD IRQ vector table to include space for event channel interrupt sources. sys/amd64/include/pcpu.h: sys/i386/include/pcpu.h: Remove Xen HVM per-cpu variable data. These fields are now allocated via the dynamic per-cpu scheme. See xen_intr.c for details. sys/amd64/include/xen/hypercall.h: sys/dev/xen/blkback/blkback.c: sys/i386/include/xen/xenvar.h: sys/i386/xen/clock.c: sys/i386/xen/xen_machdep.c: sys/xen/gnttab.c: Prefer FreeBSD primatives to Linux ones in Xen support code. sys/amd64/include/xen/xen-os.h: sys/i386/include/xen/xen-os.h: sys/xen/xen-os.h: sys/dev/xen/balloon/balloon.c: sys/dev/xen/blkback/blkback.c: sys/dev/xen/blkfront/blkfront.c: sys/dev/xen/console/xencons_ring.c: sys/dev/xen/control/control.c: sys/dev/xen/netback/netback.c: sys/dev/xen/netfront/netfront.c: sys/dev/xen/xenpci/xenpci.c: sys/i386/i386/machdep.c: sys/i386/include/pmap.h: sys/i386/include/xen/xenfunc.h: sys/i386/isa/npx.c: sys/i386/xen/clock.c: sys/i386/xen/mp_machdep.c: sys/i386/xen/mptable.c: sys/i386/xen/xen_clock_util.c: sys/i386/xen/xen_machdep.c: sys/i386/xen/xen_rtc.c: sys/xen/evtchn/evtchn_dev.c: sys/xen/features.c: sys/xen/gnttab.c: sys/xen/gnttab.h: sys/xen/hvm.h: sys/xen/xenbus/xenbus.c: sys/xen/xenbus/xenbus_if.m: sys/xen/xenbus/xenbusb_front.c: sys/xen/xenbus/xenbusvar.h: sys/xen/xenstore/xenstore.c: sys/xen/xenstore/xenstore_dev.c: sys/xen/xenstore/xenstorevar.h: Pull common Xen OS support functions/settings into xen/xen-os.h. sys/amd64/include/xen/xen-os.h: sys/i386/include/xen/xen-os.h: sys/xen/xen-os.h: Remove constants, macros, and functions unused in FreeBSD's Xen support. sys/xen/xen-os.h: sys/i386/xen/xen_machdep.c: sys/x86/xen/hvm.c: Introduce new functions xen_domain(), xen_pv_domain(), and xen_hvm_domain(). These are used in favor of #ifdefs so that FreeBSD can dynamically detect and adapt to the presence of a hypervisor. The goal is to have an HVM optimized GENERIC, but more is necessary before this is possible. sys/amd64/amd64/machdep.c: sys/dev/xen/xenpci/xenpcivar.h: sys/dev/xen/xenpci/xenpci.c: sys/x86/xen/hvm.c: sys/sys/kernel.h: Refactor magic ioport, Hypercall table and Hypervisor shared information page setup, and move it to a dedicated HVM support module. HVM mode initialization is now triggered during the SI_SUB_HYPERVISOR phase of system startup. This currently occurs just after the kernel VM is fully setup which is just enough infrastructure to allow the hypercall table and shared info page to be properly mapped. sys/xen/hvm.h: sys/x86/xen/hvm.c: Add definitions and a method for configuring Hypervisor event delievery via a direct vector callback. sys/amd64/include/xen/xen-os.h: sys/x86/xen/hvm.c: sys/conf/files: sys/conf/files.amd64: sys/conf/files.i386: Adjust kernel build to reflect the refactoring of early Xen startup code and Xen interrupt services. sys/dev/xen/blkback/blkback.c: sys/dev/xen/blkfront/blkfront.c: sys/dev/xen/blkfront/block.h: sys/dev/xen/control/control.c: sys/dev/xen/evtchn/evtchn_dev.c: sys/dev/xen/netback/netback.c: sys/dev/xen/netfront/netfront.c: sys/xen/xenstore/xenstore.c: sys/xen/evtchn/evtchn_dev.c: sys/dev/xen/console/console.c: sys/dev/xen/console/xencons_ring.c Adjust drivers to use new xen_intr_*() API. sys/dev/xen/blkback/blkback.c: Since blkback defers all event handling to a taskqueue, convert this task queue to a "fast" taskqueue, and schedule it via an interrupt filter. This avoids an unnecessary ithread context switch. sys/xen/xenstore/xenstore.c: The xenstore driver is MPSAFE. Indicate as much when registering its interrupt handler. sys/xen/xenbus/xenbus.c: sys/xen/xenbus/xenbusvar.h: Remove unused event channel APIs. sys/xen/evtchn.h: Remove all kernel Xen interrupt service API definitions from this file. It is now only used for structure and ioctl definitions related to the event channel userland device driver. Update the definitions in this file to match those from NetBSD. Implementing this interface will be necessary for Dom0 support. sys/xen/evtchn/evtchnvar.h: Add a header file for implemenation internal APIs related to managing event channels event delivery. This is used to allow, for example, the event channel userland device driver to access low-level routines that typical kernel consumers of event channel services should never access. sys/xen/interface/event_channel.h: sys/xen/xen_intr.h: Standardize on the evtchn_port_t type for referring to an event channel port id. In order to prevent low-level event channel APIs from leaking to kernel consumers who should not have access to this data, the type is defined twice: Once in the Xen provided event_channel.h, and again in xen/xen_intr.h. The double declaration is protected by __XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared twice within a given compilation unit. sys/xen/xen_intr.h: sys/xen/evtchn/evtchn.c: sys/x86/xen/xen_intr.c: sys/dev/xen/xenpci/evtchn.c: sys/dev/xen/xenpci/xenpcivar.h: New implementation of Xen interrupt services. This is similar in many respects to the i386 PV implementation with the exception that events for bound to event channel ports (i.e. not IPI, virtual IRQ, or physical IRQ) are further optimized to avoid mask/unmask operations that aren't necessary for these edge triggered events. Stubs exist for supporting physical IRQ binding, but will need additional work before this implementation can be fully shared between PV and HVM. sys/amd64/amd64/mp_machdep.c: sys/i386/i386/mp_machdep.c: sys/i386/xen/mp_machdep.c sys/x86/xen/hvm.c: Add support for placing vcpu_info into an arbritary memory page instead of using HYPERVISOR_shared_info->vcpu_info. This allows the creation of domains with more than 32 vcpus. sys/i386/i386/machdep.c: sys/i386/xen/clock.c: sys/i386/xen/xen_machdep.c: sys/i386/xen/exception.s: Add support for new event channle implementation.
350 lines
9.7 KiB
C
350 lines
9.7 KiB
C
/*
|
|
* XenBSD block device driver
|
|
*
|
|
* Copyright (c) 2010-2013 Spectra Logic Corporation
|
|
* Copyright (c) 2009 Scott Long, Yahoo!
|
|
* Copyright (c) 2009 Frank Suchomel, Citrix
|
|
* Copyright (c) 2009 Doug F. Rabson, Citrix
|
|
* Copyright (c) 2005 Kip Macy
|
|
* Copyright (c) 2003-2004, Keir Fraser & Steve Hand
|
|
* Modifications by Mark A. Williamson are (c) Intel Research Cambridge
|
|
*
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to
|
|
* deal in the Software without restriction, including without limitation the
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
* DEALINGS IN THE SOFTWARE.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#ifndef __XEN_BLKFRONT_BLOCK_H__
|
|
#define __XEN_BLKFRONT_BLOCK_H__
|
|
#include <xen/blkif.h>
|
|
|
|
/**
|
|
* Given a number of blkif segments, compute the maximum I/O size supported.
|
|
*
|
|
* \note This calculation assumes that all but the first and last segments
|
|
* of the I/O are fully utilized.
|
|
*
|
|
* \note We reserve a segement from the maximum supported by the transport to
|
|
* guarantee we can handle an unaligned transfer without the need to
|
|
* use a bounce buffer.
|
|
*/
|
|
#define XBD_SEGS_TO_SIZE(segs) \
|
|
(((segs) - 1) * PAGE_SIZE)
|
|
|
|
/**
|
|
* Compute the maximum number of blkif segments requried to represent
|
|
* an I/O of the given size.
|
|
*
|
|
* \note This calculation assumes that all but the first and last segments
|
|
* of the I/O are fully utilized.
|
|
*
|
|
* \note We reserve a segement to guarantee we can handle an unaligned
|
|
* transfer without the need to use a bounce buffer.
|
|
*/
|
|
#define XBD_SIZE_TO_SEGS(size) \
|
|
((size / PAGE_SIZE) + 1)
|
|
|
|
/**
|
|
* The maximum number of outstanding requests blocks (request headers plus
|
|
* additional segment blocks) we will allow in a negotiated block-front/back
|
|
* communication channel.
|
|
*/
|
|
#define XBD_MAX_REQUESTS 256
|
|
|
|
/**
|
|
* The maximum mapped region size per request we will allow in a negotiated
|
|
* block-front/back communication channel.
|
|
*/
|
|
#define XBD_MAX_REQUEST_SIZE \
|
|
MIN(MAXPHYS, XBD_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST))
|
|
|
|
/**
|
|
* The maximum number of segments (within a request header and accompanying
|
|
* segment blocks) per request we will allow in a negotiated block-front/back
|
|
* communication channel.
|
|
*/
|
|
#define XBD_MAX_SEGMENTS_PER_REQUEST \
|
|
(MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \
|
|
XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE)))
|
|
|
|
/**
|
|
* The maximum number of shared memory ring pages we will allow in a
|
|
* negotiated block-front/back communication channel. Allow enough
|
|
* ring space for all requests to be XBD_MAX_REQUEST_SIZE'd.
|
|
*/
|
|
#define XBD_MAX_RING_PAGES \
|
|
BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBD_MAX_SEGMENTS_PER_REQUEST) \
|
|
* XBD_MAX_REQUESTS)
|
|
|
|
typedef enum {
|
|
XBDCF_Q_MASK = 0xFF,
|
|
/* This command has contributed to xbd_qfrozen_cnt. */
|
|
XBDCF_FROZEN = 1<<8,
|
|
/* Freeze the command queue on dispatch (i.e. single step command). */
|
|
XBDCF_Q_FREEZE = 1<<9,
|
|
/* Bus DMA returned EINPROGRESS for this command. */
|
|
XBDCF_ASYNC_MAPPING = 1<<10,
|
|
XBDCF_INITIALIZER = XBDCF_Q_MASK
|
|
} xbdc_flag_t;
|
|
|
|
struct xbd_command;
|
|
typedef void xbd_cbcf_t(struct xbd_command *);
|
|
|
|
struct xbd_command {
|
|
TAILQ_ENTRY(xbd_command) cm_link;
|
|
struct xbd_softc *cm_sc;
|
|
xbdc_flag_t cm_flags;
|
|
bus_dmamap_t cm_map;
|
|
uint64_t cm_id;
|
|
grant_ref_t *cm_sg_refs;
|
|
struct bio *cm_bp;
|
|
grant_ref_t cm_gref_head;
|
|
void *cm_data;
|
|
size_t cm_datalen;
|
|
u_int cm_nseg;
|
|
int cm_operation;
|
|
blkif_sector_t cm_sector_number;
|
|
int cm_status;
|
|
xbd_cbcf_t *cm_complete;
|
|
};
|
|
|
|
typedef enum {
|
|
XBD_Q_FREE,
|
|
XBD_Q_READY,
|
|
XBD_Q_BUSY,
|
|
XBD_Q_COMPLETE,
|
|
XBD_Q_BIO,
|
|
XBD_Q_COUNT,
|
|
XBD_Q_NONE = XBDCF_Q_MASK
|
|
} xbd_q_index_t;
|
|
|
|
typedef struct xbd_cm_q {
|
|
TAILQ_HEAD(, xbd_command) q_tailq;
|
|
uint32_t q_length;
|
|
uint32_t q_max;
|
|
} xbd_cm_q_t;
|
|
|
|
typedef enum {
|
|
XBD_STATE_DISCONNECTED,
|
|
XBD_STATE_CONNECTED,
|
|
XBD_STATE_SUSPENDED
|
|
} xbd_state_t;
|
|
|
|
typedef enum {
|
|
XBDF_NONE = 0,
|
|
XBDF_OPEN = 1 << 0, /* drive is open (can't shut down) */
|
|
XBDF_BARRIER = 1 << 1, /* backend supports barriers */
|
|
XBDF_FLUSH = 1 << 2, /* backend supports flush */
|
|
XBDF_READY = 1 << 3, /* Is ready */
|
|
XBDF_CM_SHORTAGE = 1 << 4, /* Free cm resource shortage active. */
|
|
XBDF_GNT_SHORTAGE = 1 << 5, /* Grant ref resource shortage active */
|
|
XBDF_WAIT_IDLE = 1 << 6 /*
|
|
* No new work until oustanding work
|
|
* completes.
|
|
*/
|
|
} xbd_flag_t;
|
|
|
|
/*
|
|
* We have one of these per vbd, whether ide, scsi or 'other'.
|
|
*/
|
|
struct xbd_softc {
|
|
device_t xbd_dev;
|
|
struct disk *xbd_disk; /* disk params */
|
|
struct bio_queue_head xbd_bioq; /* sort queue */
|
|
int xbd_unit;
|
|
xbd_flag_t xbd_flags;
|
|
int xbd_qfrozen_cnt;
|
|
int xbd_vdevice;
|
|
xbd_state_t xbd_state;
|
|
u_int xbd_ring_pages;
|
|
uint32_t xbd_max_requests;
|
|
uint32_t xbd_max_request_segments;
|
|
uint32_t xbd_max_request_blocks;
|
|
uint32_t xbd_max_request_size;
|
|
grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES];
|
|
blkif_front_ring_t xbd_ring;
|
|
xen_intr_handle_t xen_intr_handle;
|
|
struct gnttab_free_callback xbd_callback;
|
|
xbd_cm_q_t xbd_cm_q[XBD_Q_COUNT];
|
|
bus_dma_tag_t xbd_io_dmat;
|
|
|
|
/**
|
|
* The number of people holding this device open. We won't allow a
|
|
* hot-unplug unless this is 0.
|
|
*/
|
|
int xbd_users;
|
|
struct mtx xbd_io_lock;
|
|
|
|
struct xbd_command *xbd_shadow;
|
|
};
|
|
|
|
int xbd_instance_create(struct xbd_softc *, blkif_sector_t sectors, int device,
|
|
uint16_t vdisk_info, unsigned long sector_size);
|
|
|
|
static inline void
|
|
xbd_added_qentry(struct xbd_softc *sc, xbd_q_index_t index)
|
|
{
|
|
struct xbd_cm_q *cmq;
|
|
|
|
cmq = &sc->xbd_cm_q[index];
|
|
cmq->q_length++;
|
|
if (cmq->q_length > cmq->q_max)
|
|
cmq->q_max = cmq->q_length;
|
|
}
|
|
|
|
static inline void
|
|
xbd_removed_qentry(struct xbd_softc *sc, xbd_q_index_t index)
|
|
{
|
|
sc->xbd_cm_q[index].q_length--;
|
|
}
|
|
|
|
static inline uint32_t
|
|
xbd_queue_length(struct xbd_softc *sc, xbd_q_index_t index)
|
|
{
|
|
return (sc->xbd_cm_q[index].q_length);
|
|
}
|
|
|
|
static inline void
|
|
xbd_initq_cm(struct xbd_softc *sc, xbd_q_index_t index)
|
|
{
|
|
struct xbd_cm_q *cmq;
|
|
|
|
cmq = &sc->xbd_cm_q[index];
|
|
TAILQ_INIT(&cmq->q_tailq);
|
|
cmq->q_length = 0;
|
|
cmq->q_max = 0;
|
|
}
|
|
|
|
static inline void
|
|
xbd_enqueue_cm(struct xbd_command *cm, xbd_q_index_t index)
|
|
{
|
|
KASSERT(index != XBD_Q_BIO,
|
|
("%s: Commands cannot access the bio queue.", __func__));
|
|
if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
|
|
panic("%s: command %p is already on queue %d.",
|
|
__func__, cm, cm->cm_flags & XBDCF_Q_MASK);
|
|
TAILQ_INSERT_TAIL(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
|
|
cm->cm_flags &= ~XBDCF_Q_MASK;
|
|
cm->cm_flags |= index;
|
|
xbd_added_qentry(cm->cm_sc, index);
|
|
}
|
|
|
|
static inline void
|
|
xbd_requeue_cm(struct xbd_command *cm, xbd_q_index_t index)
|
|
{
|
|
KASSERT(index != XBD_Q_BIO,
|
|
("%s: Commands cannot access the bio queue.", __func__));
|
|
if ((cm->cm_flags & XBDCF_Q_MASK) != XBD_Q_NONE)
|
|
panic("%s: command %p is already on queue %d.",
|
|
__func__, cm, cm->cm_flags & XBDCF_Q_MASK);
|
|
TAILQ_INSERT_HEAD(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
|
|
cm->cm_flags &= ~XBDCF_Q_MASK;
|
|
cm->cm_flags |= index;
|
|
xbd_added_qentry(cm->cm_sc, index);
|
|
}
|
|
|
|
static inline struct xbd_command *
|
|
xbd_dequeue_cm(struct xbd_softc *sc, xbd_q_index_t index)
|
|
{
|
|
struct xbd_command *cm;
|
|
|
|
KASSERT(index != XBD_Q_BIO,
|
|
("%s: Commands cannot access the bio queue.", __func__));
|
|
|
|
if ((cm = TAILQ_FIRST(&sc->xbd_cm_q[index].q_tailq)) != NULL) {
|
|
if ((cm->cm_flags & XBDCF_Q_MASK) != index) {
|
|
panic("%s: command %p is on queue %d, "
|
|
"not specified queue %d",
|
|
__func__, cm,
|
|
cm->cm_flags & XBDCF_Q_MASK,
|
|
index);
|
|
}
|
|
TAILQ_REMOVE(&sc->xbd_cm_q[index].q_tailq, cm, cm_link);
|
|
cm->cm_flags &= ~XBDCF_Q_MASK;
|
|
cm->cm_flags |= XBD_Q_NONE;
|
|
xbd_removed_qentry(cm->cm_sc, index);
|
|
}
|
|
return (cm);
|
|
}
|
|
|
|
static inline void
|
|
xbd_remove_cm(struct xbd_command *cm, xbd_q_index_t expected_index)
|
|
{
|
|
xbd_q_index_t index;
|
|
|
|
index = cm->cm_flags & XBDCF_Q_MASK;
|
|
|
|
KASSERT(index != XBD_Q_BIO,
|
|
("%s: Commands cannot access the bio queue.", __func__));
|
|
|
|
if (index != expected_index) {
|
|
panic("%s: command %p is on queue %d, not specified queue %d",
|
|
__func__, cm, index, expected_index);
|
|
}
|
|
TAILQ_REMOVE(&cm->cm_sc->xbd_cm_q[index].q_tailq, cm, cm_link);
|
|
cm->cm_flags &= ~XBDCF_Q_MASK;
|
|
cm->cm_flags |= XBD_Q_NONE;
|
|
xbd_removed_qentry(cm->cm_sc, index);
|
|
}
|
|
|
|
static inline void
|
|
xbd_initq_bio(struct xbd_softc *sc)
|
|
{
|
|
bioq_init(&sc->xbd_bioq);
|
|
}
|
|
|
|
static inline void
|
|
xbd_enqueue_bio(struct xbd_softc *sc, struct bio *bp)
|
|
{
|
|
bioq_insert_tail(&sc->xbd_bioq, bp);
|
|
xbd_added_qentry(sc, XBD_Q_BIO);
|
|
}
|
|
|
|
static inline void
|
|
xbd_requeue_bio(struct xbd_softc *sc, struct bio *bp)
|
|
{
|
|
bioq_insert_head(&sc->xbd_bioq, bp);
|
|
xbd_added_qentry(sc, XBD_Q_BIO);
|
|
}
|
|
|
|
static inline struct bio *
|
|
xbd_dequeue_bio(struct xbd_softc *sc)
|
|
{
|
|
struct bio *bp;
|
|
|
|
if ((bp = bioq_first(&sc->xbd_bioq)) != NULL) {
|
|
bioq_remove(&sc->xbd_bioq, bp);
|
|
xbd_removed_qentry(sc, XBD_Q_BIO);
|
|
}
|
|
return (bp);
|
|
}
|
|
|
|
static inline void
|
|
xbd_initqs(struct xbd_softc *sc)
|
|
{
|
|
u_int index;
|
|
|
|
for (index = 0; index < XBD_Q_COUNT; index++)
|
|
xbd_initq_cm(sc, index);
|
|
|
|
xbd_initq_bio(sc);
|
|
}
|
|
|
|
#endif /* __XEN_BLKFRONT_BLOCK_H__ */
|