Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
/******************************************************************************
|
|
|
|
* xen_intr.c
|
|
|
|
*
|
|
|
|
* Xen event and interrupt services for x86 PV and HVM guests.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002-2005, K A Fraser
|
|
|
|
* Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
|
|
|
|
* Copyright (c) 2012, Spectra Logic Corporation
|
|
|
|
*
|
|
|
|
* This file may be distributed separately from the Linux kernel, or
|
|
|
|
* incorporated into other software packages, subject to the following license:
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this source file (the "Software"), to deal in the Software without
|
|
|
|
* restriction, including without limitation the rights to use, copy, modify,
|
|
|
|
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
|
|
|
* and to permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/limits.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/interrupt.h>
|
|
|
|
#include <sys/pcpu.h>
|
|
|
|
#include <sys/smp.h>
|
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
|
|
|
|
#include <machine/intr_machdep.h>
|
|
|
|
#include <machine/apicvar.h>
|
|
|
|
#include <machine/smp.h>
|
|
|
|
#include <machine/stdarg.h>
|
|
|
|
|
|
|
|
#include <machine/xen/synch_bitops.h>
|
|
|
|
#include <machine/xen/xen-os.h>
|
|
|
|
#include <machine/xen/xenvar.h>
|
|
|
|
|
|
|
|
#include <xen/hypervisor.h>
|
|
|
|
#include <xen/xen_intr.h>
|
|
|
|
#include <xen/evtchn/evtchnvar.h>
|
|
|
|
|
|
|
|
#include <dev/xen/xenpci/xenpcivar.h>
|
|
|
|
|
|
|
|
static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Per-cpu event channel processing state.
|
|
|
|
*/
|
|
|
|
struct xen_intr_pcpu_data {
|
|
|
|
/**
|
|
|
|
* The last event channel bitmap section (level one bit) processed.
|
|
|
|
* This is used to ensure we scan all ports before
|
|
|
|
* servicing an already servied port again.
|
|
|
|
*/
|
|
|
|
u_int last_processed_l1i;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The last event channel processed within the event channel
|
|
|
|
* bitmap being scanned.
|
|
|
|
*/
|
|
|
|
u_int last_processed_l2i;
|
|
|
|
|
|
|
|
/** Pointer to this CPU's interrupt statistic counter. */
|
|
|
|
u_long *evtchn_intrcnt;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A bitmap of ports that can be serviced from this CPU.
|
|
|
|
* A set bit means interrupt handling is enabled.
|
|
|
|
*/
|
|
|
|
u_long evtchn_enabled[sizeof(u_long) * 8];
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start the scan at port 0 by initializing the last scanned
|
|
|
|
* location as the highest numbered event channel port.
|
|
|
|
*/
|
|
|
|
DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
|
|
|
|
.last_processed_l1i = LONG_BIT - 1,
|
|
|
|
.last_processed_l2i = LONG_BIT - 1
|
|
|
|
};
|
|
|
|
|
|
|
|
DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
|
|
|
|
|
|
|
|
#define is_valid_evtchn(x) ((x) != 0)
|
|
|
|
|
|
|
|
struct xenisrc {
|
|
|
|
struct intsrc xi_intsrc;
|
|
|
|
enum evtchn_type xi_type;
|
|
|
|
int xi_cpu; /* VCPU for delivery. */
|
|
|
|
int xi_vector; /* Global isrc vector number. */
|
|
|
|
evtchn_port_t xi_port;
|
|
|
|
int xi_pirq;
|
|
|
|
int xi_virq;
|
|
|
|
u_int xi_close:1; /* close on unbind? */
|
|
|
|
u_int xi_needs_eoi:1;
|
|
|
|
u_int xi_shared:1; /* Shared with other domains. */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
|
|
|
|
|
|
|
|
static void xen_intr_suspend(struct pic *);
|
|
|
|
static void xen_intr_resume(struct pic *);
|
|
|
|
static void xen_intr_enable_source(struct intsrc *isrc);
|
|
|
|
static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
|
|
|
|
static void xen_intr_eoi_source(struct intsrc *isrc);
|
|
|
|
static void xen_intr_enable_intr(struct intsrc *isrc);
|
|
|
|
static void xen_intr_disable_intr(struct intsrc *isrc);
|
|
|
|
static int xen_intr_vector(struct intsrc *isrc);
|
|
|
|
static int xen_intr_source_pending(struct intsrc *isrc);
|
|
|
|
static int xen_intr_config_intr(struct intsrc *isrc,
|
|
|
|
enum intr_trigger trig, enum intr_polarity pol);
|
|
|
|
static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
|
|
|
|
|
|
|
|
static void xen_intr_pirq_enable_source(struct intsrc *isrc);
|
|
|
|
static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
|
|
|
|
static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
|
|
|
|
static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* PIC interface for all event channel port types except physical IRQs.
|
|
|
|
*/
|
|
|
|
struct pic xen_intr_pic = {
|
|
|
|
.pic_enable_source = xen_intr_enable_source,
|
|
|
|
.pic_disable_source = xen_intr_disable_source,
|
|
|
|
.pic_eoi_source = xen_intr_eoi_source,
|
|
|
|
.pic_enable_intr = xen_intr_enable_intr,
|
|
|
|
.pic_disable_intr = xen_intr_disable_intr,
|
|
|
|
.pic_vector = xen_intr_vector,
|
|
|
|
.pic_source_pending = xen_intr_source_pending,
|
|
|
|
.pic_suspend = xen_intr_suspend,
|
|
|
|
.pic_resume = xen_intr_resume,
|
|
|
|
.pic_config_intr = xen_intr_config_intr,
|
|
|
|
.pic_assign_cpu = xen_intr_assign_cpu
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* PIC interface for all event channel representing
|
|
|
|
* physical interrupt sources.
|
|
|
|
*/
|
|
|
|
struct pic xen_intr_pirq_pic = {
|
|
|
|
.pic_enable_source = xen_intr_pirq_enable_source,
|
|
|
|
.pic_disable_source = xen_intr_pirq_disable_source,
|
|
|
|
.pic_eoi_source = xen_intr_pirq_eoi_source,
|
|
|
|
.pic_enable_intr = xen_intr_pirq_enable_intr,
|
|
|
|
.pic_disable_intr = xen_intr_disable_intr,
|
|
|
|
.pic_vector = xen_intr_vector,
|
|
|
|
.pic_source_pending = xen_intr_source_pending,
|
|
|
|
.pic_suspend = xen_intr_suspend,
|
|
|
|
.pic_resume = xen_intr_resume,
|
|
|
|
.pic_config_intr = xen_intr_config_intr,
|
|
|
|
.pic_assign_cpu = xen_intr_assign_cpu
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct mtx xen_intr_isrc_lock;
|
|
|
|
static int xen_intr_isrc_count;
|
|
|
|
static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
|
|
|
|
|
|
|
|
/*------------------------- Private Functions --------------------------------*/
|
|
|
|
/**
|
|
|
|
* Disable signal delivery for an event channel port on the
|
|
|
|
* specified CPU.
|
|
|
|
*
|
|
|
|
* \param port The event channel port to mask.
|
|
|
|
*
|
|
|
|
* This API is used to manage the port<=>CPU binding of event
|
|
|
|
* channel handlers.
|
|
|
|
*
|
|
|
|
* \note This operation does not preclude reception of an event
|
|
|
|
* for this event channel on another CPU. To mask the
|
|
|
|
* event channel globally, use evtchn_mask().
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
|
|
|
|
{
|
|
|
|
struct xen_intr_pcpu_data *pcpu;
|
|
|
|
|
|
|
|
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
|
|
|
|
clear_bit(port, pcpu->evtchn_enabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enable signal delivery for an event channel port on the
|
|
|
|
* specified CPU.
|
|
|
|
*
|
|
|
|
* \param port The event channel port to unmask.
|
|
|
|
*
|
|
|
|
* This API is used to manage the port<=>CPU binding of event
|
|
|
|
* channel handlers.
|
|
|
|
*
|
|
|
|
* \note This operation does not guarantee that event delivery
|
|
|
|
* is enabled for this event channel port. The port must
|
|
|
|
* also be globally enabled. See evtchn_unmask().
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
|
|
|
|
{
|
|
|
|
struct xen_intr_pcpu_data *pcpu;
|
|
|
|
|
|
|
|
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
|
|
|
|
set_bit(port, pcpu->evtchn_enabled);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Allocate and register a per-cpu Xen upcall interrupt counter.
|
|
|
|
*
|
|
|
|
* \param cpu The cpu for which to register this interrupt count.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_intrcnt_add(u_int cpu)
|
|
|
|
{
|
|
|
|
char buf[MAXCOMLEN + 1];
|
|
|
|
struct xen_intr_pcpu_data *pcpu;
|
|
|
|
|
|
|
|
pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
|
|
|
|
if (pcpu->evtchn_intrcnt != NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
|
|
|
|
intrcnt_add(buf, &pcpu->evtchn_intrcnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Search for an already allocated but currently unused Xen interrupt
|
|
|
|
* source object.
|
|
|
|
*
|
|
|
|
* \param type Restrict the search to interrupt sources of the given
|
|
|
|
* type.
|
|
|
|
*
|
|
|
|
* \return A pointer to a free Xen interrupt source object or NULL.
|
|
|
|
*/
|
|
|
|
static struct xenisrc *
|
|
|
|
xen_intr_find_unused_isrc(enum evtchn_type type)
|
|
|
|
{
|
|
|
|
int isrc_idx;
|
|
|
|
|
|
|
|
KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
|
|
|
|
|
|
|
|
for (isrc_idx = 0; isrc_idx < xen_intr_isrc_count; isrc_idx ++) {
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
u_int vector;
|
|
|
|
|
|
|
|
vector = FIRST_EVTCHN_INT + isrc_idx;
|
|
|
|
isrc = (struct xenisrc *)intr_lookup_source(vector);
|
|
|
|
if (isrc != NULL
|
|
|
|
&& isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
|
|
|
|
KASSERT(isrc->xi_intsrc.is_handlers == 0,
|
|
|
|
("Free evtchn still has handlers"));
|
|
|
|
isrc->xi_type = type;
|
|
|
|
return (isrc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Allocate a Xen interrupt source object.
|
|
|
|
*
|
|
|
|
* \param type The type of interrupt source to create.
|
|
|
|
*
|
|
|
|
* \return A pointer to a newly allocated Xen interrupt source
|
|
|
|
* object or NULL.
|
|
|
|
*/
|
|
|
|
static struct xenisrc *
|
|
|
|
xen_intr_alloc_isrc(enum evtchn_type type)
|
|
|
|
{
|
|
|
|
static int warned;
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
int vector;
|
|
|
|
|
|
|
|
KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
|
|
|
|
|
|
|
|
if (xen_intr_isrc_count > NR_EVENT_CHANNELS) {
|
|
|
|
if (!warned) {
|
|
|
|
warned = 1;
|
|
|
|
printf("xen_intr_alloc: Event channels exhausted.\n");
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
vector = FIRST_EVTCHN_INT + xen_intr_isrc_count;
|
|
|
|
xen_intr_isrc_count++;
|
|
|
|
|
|
|
|
mtx_unlock(&xen_intr_isrc_lock);
|
|
|
|
isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
|
|
|
|
isrc->xi_intsrc.is_pic = &xen_intr_pic;
|
|
|
|
isrc->xi_vector = vector;
|
|
|
|
isrc->xi_type = type;
|
|
|
|
intr_register_source(&isrc->xi_intsrc);
|
|
|
|
mtx_lock(&xen_intr_isrc_lock);
|
|
|
|
|
|
|
|
return (isrc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Attempt to free an active Xen interrupt source object.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source object to release.
|
|
|
|
*
|
|
|
|
* \returns EBUSY if the source is still in use, otherwise 0.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xen_intr_release_isrc(struct xenisrc *isrc)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_lock(&xen_intr_isrc_lock);
|
|
|
|
if (isrc->xi_intsrc.is_handlers != 0) {
|
|
|
|
mtx_unlock(&xen_intr_isrc_lock);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
evtchn_mask_port(isrc->xi_port);
|
|
|
|
evtchn_clear_port(isrc->xi_port);
|
|
|
|
|
|
|
|
/* Rebind port to CPU 0. */
|
|
|
|
evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
|
|
|
|
evtchn_cpu_unmask_port(0, isrc->xi_port);
|
|
|
|
|
|
|
|
if (isrc->xi_close != 0) {
|
|
|
|
struct evtchn_close close = { .port = isrc->xi_port };
|
|
|
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
|
|
|
|
panic("EVTCHNOP_close failed");
|
|
|
|
}
|
|
|
|
|
|
|
|
xen_intr_port_to_isrc[isrc->xi_port] = NULL;
|
|
|
|
isrc->xi_cpu = 0;
|
|
|
|
isrc->xi_type = EVTCHN_TYPE_UNBOUND;
|
|
|
|
isrc->xi_port = 0;
|
|
|
|
mtx_unlock(&xen_intr_isrc_lock);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Associate an interrupt handler with an already allocated local Xen
|
|
|
|
* event channel port.
|
|
|
|
*
|
|
|
|
* \param isrcp The returned Xen interrupt object associated with
|
|
|
|
* the specified local port.
|
|
|
|
* \param local_port The event channel to bind.
|
|
|
|
* \param type The event channel type of local_port.
|
|
|
|
* \param intr_owner The device making this bind request.
|
|
|
|
* \param filter An interrupt filter handler. Specify NULL
|
|
|
|
* to always dispatch to the ithread handler.
|
|
|
|
* \param handler An interrupt ithread handler. Optional (can
|
|
|
|
* specify NULL) if all necessary event actions
|
|
|
|
* are performed by filter.
|
|
|
|
* \param arg Argument to present to both filter and handler.
|
|
|
|
* \param irqflags Interrupt handler flags. See sys/bus.h.
|
|
|
|
* \param handlep Pointer to an opaque handle used to manage this
|
|
|
|
* registration.
|
|
|
|
*
|
|
|
|
* \returns 0 on success, otherwise an errno.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
|
|
|
|
enum evtchn_type type, device_t intr_owner, driver_filter_t filter,
|
|
|
|
driver_intr_t handler, void *arg, enum intr_type flags,
|
|
|
|
xen_intr_handle_t *port_handlep)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
*isrcp = NULL;
|
|
|
|
if (port_handlep == NULL) {
|
|
|
|
device_printf(intr_owner,
|
|
|
|
"xen_intr_bind_isrc: Bad event handle\n");
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_lock(&xen_intr_isrc_lock);
|
|
|
|
isrc = xen_intr_find_unused_isrc(type);
|
|
|
|
if (isrc == NULL) {
|
|
|
|
isrc = xen_intr_alloc_isrc(type);
|
|
|
|
if (isrc == NULL) {
|
|
|
|
mtx_unlock(&xen_intr_isrc_lock);
|
|
|
|
return (ENOSPC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
isrc->xi_port = local_port;
|
|
|
|
xen_intr_port_to_isrc[local_port] = isrc;
|
|
|
|
mtx_unlock(&xen_intr_isrc_lock);
|
|
|
|
|
|
|
|
error = intr_add_handler(device_get_nameunit(intr_owner),
|
|
|
|
isrc->xi_vector, filter, handler, arg,
|
|
|
|
flags|INTR_EXCL, port_handlep);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(intr_owner,
|
|
|
|
"xen_intr_bind_irq: intr_add_handler failed\n");
|
|
|
|
xen_intr_release_isrc(isrc);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
*isrcp = isrc;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Lookup a Xen interrupt source object given an interrupt binding handle.
|
|
|
|
*
|
|
|
|
* \param handle A handle initialized by a previous call to
|
|
|
|
* xen_intr_bind_isrc().
|
|
|
|
*
|
|
|
|
* \returns A pointer to the Xen interrupt source object associated
|
|
|
|
* with the given interrupt handle. NULL if no association
|
|
|
|
* currently exists.
|
|
|
|
*/
|
|
|
|
static struct xenisrc *
|
|
|
|
xen_intr_isrc(xen_intr_handle_t handle)
|
|
|
|
{
|
|
|
|
struct intr_handler *ih;
|
|
|
|
|
|
|
|
ih = handle;
|
|
|
|
if (ih == NULL || ih->ih_event == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
return (ih->ih_event->ie_source);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Determine the event channel ports at the given section of the
|
|
|
|
* event port bitmap which have pending events for the given cpu.
|
|
|
|
*
|
|
|
|
* \param pcpu The Xen interrupt pcpu data for the cpu being querried.
|
|
|
|
* \param sh The Xen shared info area.
|
|
|
|
* \param idx The index of the section of the event channel bitmap to
|
|
|
|
* inspect.
|
|
|
|
*
|
|
|
|
* \returns A u_long with bits set for every event channel with pending
|
|
|
|
* events.
|
|
|
|
*/
|
|
|
|
static inline u_long
|
|
|
|
xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
|
|
|
|
u_int idx)
|
|
|
|
{
|
|
|
|
return (sh->evtchn_pending[idx]
|
|
|
|
& ~sh->evtchn_mask[idx]
|
|
|
|
& pcpu->evtchn_enabled[idx]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Interrupt handler for processing all Xen event channel events.
|
|
|
|
*
|
|
|
|
* \param trap_frame The trap frame context for the current interrupt.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xen_intr_handle_upcall(struct trapframe *trap_frame)
|
|
|
|
{
|
|
|
|
u_int l1i, l2i, port, cpu;
|
|
|
|
u_long masked_l1, masked_l2;
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
shared_info_t *s;
|
|
|
|
vcpu_info_t *v;
|
|
|
|
struct xen_intr_pcpu_data *pc;
|
|
|
|
u_long l1, l2;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable preemption in order to always check and fire events
|
|
|
|
* on the right vCPU
|
|
|
|
*/
|
|
|
|
critical_enter();
|
|
|
|
|
|
|
|
cpu = PCPU_GET(cpuid);
|
|
|
|
pc = DPCPU_PTR(xen_intr_pcpu);
|
|
|
|
s = HYPERVISOR_shared_info;
|
|
|
|
v = DPCPU_GET(vcpu_info);
|
|
|
|
|
|
|
|
if (xen_hvm_domain() && !xen_vector_callback_enabled) {
|
|
|
|
KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
|
|
|
|
}
|
|
|
|
|
|
|
|
v->evtchn_upcall_pending = 0;
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
|
|
|
|
/* Clear master flag /before/ clearing selector flag. */
|
|
|
|
wmb();
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
|
|
|
|
|
|
|
|
l1i = pc->last_processed_l1i;
|
|
|
|
l2i = pc->last_processed_l2i;
|
|
|
|
(*pc->evtchn_intrcnt)++;
|
|
|
|
|
|
|
|
while (l1 != 0) {
|
|
|
|
|
|
|
|
l1i = (l1i + 1) % LONG_BIT;
|
|
|
|
masked_l1 = l1 & ((~0UL) << l1i);
|
|
|
|
|
|
|
|
if (masked_l1 == 0) {
|
|
|
|
/*
|
|
|
|
* if we masked out all events, wrap around
|
|
|
|
* to the beginning.
|
|
|
|
*/
|
|
|
|
l1i = LONG_BIT - 1;
|
|
|
|
l2i = LONG_BIT - 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
l1i = ffsl(masked_l1) - 1;
|
|
|
|
|
|
|
|
do {
|
|
|
|
l2 = xen_intr_active_ports(pc, s, l1i);
|
|
|
|
|
|
|
|
l2i = (l2i + 1) % LONG_BIT;
|
|
|
|
masked_l2 = l2 & ((~0UL) << l2i);
|
|
|
|
|
|
|
|
if (masked_l2 == 0) {
|
|
|
|
/* if we masked out all events, move on */
|
|
|
|
l2i = LONG_BIT - 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
l2i = ffsl(masked_l2) - 1;
|
|
|
|
|
|
|
|
/* process port */
|
|
|
|
port = (l1i * LONG_BIT) + l2i;
|
|
|
|
synch_clear_bit(port, &s->evtchn_pending[0]);
|
|
|
|
|
|
|
|
isrc = xen_intr_port_to_isrc[port];
|
|
|
|
if (__predict_false(isrc == NULL))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Make sure we are firing on the right vCPU */
|
|
|
|
KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
|
|
|
|
("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
|
|
|
|
PCPU_GET(cpuid), isrc->xi_cpu));
|
|
|
|
|
|
|
|
intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is the final port processed,
|
|
|
|
* we'll pick up here+1 next time.
|
|
|
|
*/
|
|
|
|
pc->last_processed_l1i = l1i;
|
|
|
|
pc->last_processed_l2i = l2i;
|
|
|
|
|
|
|
|
} while (l2i != LONG_BIT - 1);
|
|
|
|
|
|
|
|
l2 = xen_intr_active_ports(pc, s, l1i);
|
|
|
|
if (l2 == 0) {
|
|
|
|
/*
|
|
|
|
* We handled all ports, so we can clear the
|
|
|
|
* selector bit.
|
|
|
|
*/
|
|
|
|
l1 &= ~(1UL << l1i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
critical_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xen_intr_init(void *dummy __unused)
|
|
|
|
{
|
|
|
|
struct xen_intr_pcpu_data *pcpu;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register interrupt count manually as we aren't
|
|
|
|
* guaranteed to see a call to xen_intr_assign_cpu()
|
|
|
|
* before our first interrupt. Also set the per-cpu
|
|
|
|
* mask of CPU#0 to enable all, since by default
|
|
|
|
* all event channels are bound to CPU#0.
|
|
|
|
*/
|
|
|
|
CPU_FOREACH(i) {
|
|
|
|
pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
|
|
|
|
memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
|
|
|
|
sizeof(pcpu->evtchn_enabled));
|
|
|
|
xen_intr_intrcnt_add(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
intr_register_pic(&xen_intr_pic);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intr_init, NULL);
|
|
|
|
|
|
|
|
/*--------------------------- Common PIC Functions ---------------------------*/
|
|
|
|
/**
|
|
|
|
* Prepare this PIC for system suspension.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_suspend(struct pic *unused)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return this PIC to service after being suspended.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_resume(struct pic *unused)
|
|
|
|
{
|
|
|
|
u_int port;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mask events for all ports. They will be unmasked after
|
|
|
|
* drivers have re-registered their handlers.
|
|
|
|
*/
|
|
|
|
for (port = 0; port < NR_EVENT_CHANNELS; port++)
|
|
|
|
evtchn_mask_port(port);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Disable a Xen interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to disable.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_disable_intr(struct intsrc *base_isrc)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc = (struct xenisrc *)base_isrc;
|
|
|
|
|
|
|
|
evtchn_mask_port(isrc->xi_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Determine the global interrupt vector number for
|
|
|
|
* a Xen interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to query.
|
|
|
|
*
|
|
|
|
* \return The vector number corresponding to the given interrupt source.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xen_intr_vector(struct intsrc *base_isrc)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc = (struct xenisrc *)base_isrc;
|
|
|
|
|
|
|
|
return (isrc->xi_vector);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Determine whether or not interrupt events are pending on the
|
|
|
|
* the given interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to query.
|
|
|
|
*
|
|
|
|
* \returns 0 if no events are pending, otherwise non-zero.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xen_intr_source_pending(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* EventChannels are edge triggered and never masked.
|
|
|
|
* There can be no pending events.
|
|
|
|
*/
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Perform configuration of an interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to configure.
|
|
|
|
* \param trig Edge or level.
|
|
|
|
* \param pol Active high or low.
|
|
|
|
*
|
|
|
|
* \returns 0 if no events are pending, otherwise non-zero.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
|
|
|
|
enum intr_polarity pol)
|
|
|
|
{
|
|
|
|
/* Configuration is only possible via the evtchn apis. */
|
|
|
|
return (ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Configure CPU affinity for interrupt source event delivery.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to configure.
|
|
|
|
* \param apic_id The apic id of the CPU for handling future events.
|
|
|
|
*
|
|
|
|
* \returns 0 if successful, otherwise an errno.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
|
|
|
|
{
|
|
|
|
struct evtchn_bind_vcpu bind_vcpu;
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
u_int to_cpu, acpi_id;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
#ifdef XENHVM
|
|
|
|
if (xen_vector_callback_enabled == 0)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
to_cpu = apic_cpuid(apic_id);
|
|
|
|
acpi_id = pcpu_find(to_cpu)->pc_acpi_id;
|
|
|
|
xen_intr_intrcnt_add(to_cpu);
|
|
|
|
|
|
|
|
mtx_lock(&xen_intr_isrc_lock);
|
|
|
|
isrc = (struct xenisrc *)base_isrc;
|
|
|
|
if (!is_valid_evtchn(isrc->xi_port)) {
|
|
|
|
mtx_unlock(&xen_intr_isrc_lock);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
|
|
|
|
(isrc->xi_type == EVTCHN_TYPE_IPI)) {
|
|
|
|
/*
|
|
|
|
* Virtual IRQs are associated with a cpu by
|
|
|
|
* the Hypervisor at evtchn_bind_virq time, so
|
|
|
|
* all we need to do is update the per-CPU masks.
|
|
|
|
*/
|
|
|
|
evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
|
|
|
|
isrc->xi_cpu = to_cpu;
|
|
|
|
evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
|
|
|
|
mtx_unlock(&xen_intr_isrc_lock);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
bind_vcpu.port = isrc->xi_port;
|
|
|
|
bind_vcpu.vcpu = acpi_id;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow interrupts to be fielded on the new VCPU before
|
|
|
|
* we ask the hypervisor to deliver them there.
|
|
|
|
*/
|
|
|
|
evtchn_cpu_unmask_port(to_cpu, isrc->xi_port);
|
|
|
|
error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
|
|
|
|
if (isrc->xi_cpu != to_cpu) {
|
|
|
|
if (error == 0) {
|
|
|
|
/* Commit to new binding by removing the old one. */
|
|
|
|
evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
|
|
|
|
isrc->xi_cpu = to_cpu;
|
|
|
|
} else {
|
|
|
|
/* Roll-back to previous binding. */
|
|
|
|
evtchn_cpu_mask_port(to_cpu, isrc->xi_port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mtx_unlock(&xen_intr_isrc_lock);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*------------------- Virtual Interrupt Source PIC Functions -----------------*/
|
|
|
|
/*
|
|
|
|
* Mask a level triggered interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to mask (if necessary).
|
|
|
|
* \param eoi If non-zero, perform any necessary end-of-interrupt
|
|
|
|
* acknowledgements.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_disable_source(struct intsrc *isrc, int eoi)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unmask a level triggered interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to unmask (if necessary).
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_enable_source(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform any necessary end-of-interrupt acknowledgements.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to EOI.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_eoi_source(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable and unmask the interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to enable.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_enable_intr(struct intsrc *base_isrc)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc = (struct xenisrc *)base_isrc;
|
|
|
|
|
|
|
|
evtchn_unmask_port(isrc->xi_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*------------------ Physical Interrupt Source PIC Functions -----------------*/
|
|
|
|
/*
|
|
|
|
* Mask a level triggered interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to mask (if necessary).
|
|
|
|
* \param eoi If non-zero, perform any necessary end-of-interrupt
|
|
|
|
* acknowledgements.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
|
|
|
|
isrc = (struct xenisrc *)base_isrc;
|
|
|
|
evtchn_mask_port(isrc->xi_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unmask a level triggered interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to unmask (if necessary).
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_pirq_enable_source(struct intsrc *base_isrc)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
|
|
|
|
isrc = (struct xenisrc *)base_isrc;
|
|
|
|
evtchn_unmask_port(isrc->xi_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform any necessary end-of-interrupt acknowledgements.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to EOI.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
|
|
|
|
/* XXX Use shared page of flags for this. */
|
|
|
|
isrc = (struct xenisrc *)base_isrc;
|
|
|
|
if (isrc->xi_needs_eoi != 0) {
|
|
|
|
struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
|
|
|
|
|
|
|
|
(void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable and unmask the interrupt source.
|
|
|
|
*
|
|
|
|
* \param isrc The interrupt source to enable.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xen_intr_pirq_enable_intr(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*--------------------------- Public Functions -------------------------------*/
|
|
|
|
/*------- API comments for these methods can be found in xen/xenintr.h -------*/
|
|
|
|
int
|
|
|
|
xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
|
|
|
|
driver_filter_t filter, driver_intr_t handler, void *arg,
|
|
|
|
enum intr_type flags, xen_intr_handle_t *port_handlep)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT, dev,
|
|
|
|
filter, handler, arg, flags, port_handlep);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Event Channel API didn't open this port, so it is not
|
|
|
|
* responsible for closing it automatically on unbind.
|
|
|
|
*/
|
|
|
|
isrc->xi_close = 0;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
|
|
|
|
driver_filter_t filter, driver_intr_t handler, void *arg,
|
|
|
|
enum intr_type flags, xen_intr_handle_t *port_handlep)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
struct evtchn_alloc_unbound alloc_unbound;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
alloc_unbound.dom = DOMID_SELF;
|
|
|
|
alloc_unbound.remote_dom = remote_domain;
|
|
|
|
error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
|
|
|
|
&alloc_unbound);
|
|
|
|
if (error != 0) {
|
|
|
|
/*
|
|
|
|
* XXX Trap Hypercall error code Linuxisms in
|
|
|
|
* the HYPERCALL layer.
|
|
|
|
*/
|
|
|
|
return (-error);
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
|
|
|
|
dev, filter, handler, arg, flags,
|
|
|
|
port_handlep);
|
|
|
|
if (error != 0) {
|
|
|
|
evtchn_close_t close = { .port = alloc_unbound.port };
|
|
|
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
|
|
|
|
panic("EVTCHNOP_close failed");
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
isrc->xi_close = 1;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
|
|
|
|
u_int remote_port, driver_filter_t filter, driver_intr_t handler,
|
|
|
|
void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
struct evtchn_bind_interdomain bind_interdomain;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
bind_interdomain.remote_dom = remote_domain;
|
|
|
|
bind_interdomain.remote_port = remote_port;
|
|
|
|
error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
|
|
|
|
&bind_interdomain);
|
|
|
|
if (error != 0) {
|
|
|
|
/*
|
|
|
|
* XXX Trap Hypercall error code Linuxisms in
|
|
|
|
* the HYPERCALL layer.
|
|
|
|
*/
|
|
|
|
return (-error);
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
|
|
|
|
EVTCHN_TYPE_PORT, dev, filter, handler,
|
|
|
|
arg, flags, port_handlep);
|
|
|
|
if (error) {
|
|
|
|
evtchn_close_t close = { .port = bind_interdomain.local_port };
|
|
|
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
|
|
|
|
panic("EVTCHNOP_close failed");
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Event Channel API opened this port, so it is
|
|
|
|
* responsible for closing it automatically on unbind.
|
|
|
|
*/
|
|
|
|
isrc->xi_close = 1;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
|
|
|
|
driver_filter_t filter, driver_intr_t handler, void *arg,
|
|
|
|
enum intr_type flags, xen_intr_handle_t *port_handlep)
|
|
|
|
{
|
|
|
|
int acpi_id = pcpu_find(cpu)->pc_acpi_id;
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = acpi_id };
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Ensure the target CPU is ready to handle evtchn interrupts. */
|
|
|
|
xen_intr_intrcnt_add(cpu);
|
|
|
|
|
|
|
|
isrc = NULL;
|
|
|
|
error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
|
|
|
|
if (error != 0) {
|
|
|
|
/*
|
|
|
|
* XXX Trap Hypercall error code Linuxisms in
|
|
|
|
* the HYPERCALL layer.
|
|
|
|
*/
|
|
|
|
return (-error);
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ, dev,
|
|
|
|
filter, handler, arg, flags, port_handlep);
|
|
|
|
if (error == 0)
|
|
|
|
error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
|
|
|
|
|
|
|
|
if (error != 0) {
|
|
|
|
evtchn_close_t close = { .port = bind_virq.port };
|
|
|
|
|
|
|
|
xen_intr_unbind(*port_handlep);
|
|
|
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
|
|
|
|
panic("EVTCHNOP_close failed");
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isrc->xi_cpu != cpu) {
|
|
|
|
/*
|
|
|
|
* Too early in the boot process for the generic interrupt
|
|
|
|
* code to perform the binding. Update our event channel
|
|
|
|
* masks manually so events can't fire on the wrong cpu
|
|
|
|
* during AP startup.
|
|
|
|
*/
|
|
|
|
xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Event Channel API opened this port, so it is
|
|
|
|
* responsible for closing it automatically on unbind.
|
|
|
|
*/
|
|
|
|
isrc->xi_close = 1;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2013-09-06 22:17:02 +00:00
|
|
|
xen_intr_alloc_and_bind_ipi(device_t dev, u_int cpu,
|
Implement vector callback for PVHVM and unify event channel implementations
Re-structure Xen HVM support so that:
- Xen is detected and hypercalls can be performed very
early in system startup.
- Xen interrupt services are implemented using FreeBSD's native
interrupt delivery infrastructure.
- the Xen interrupt service implementation is shared between PV
and HVM guests.
- Xen interrupt handlers can optionally use a filter handler
in order to avoid the overhead of dispatch to an interrupt
thread.
- interrupt load can be distributed among all available CPUs.
- the overhead of accessing the emulated local and I/O apics
on HVM is removed for event channel port events.
- a similar optimization can eventually, and fairly easily,
be used to optimize MSI.
Early Xen detection, HVM refactoring, PVHVM interrupt infrastructure,
and misc Xen cleanups:
Sponsored by: Spectra Logic Corporation
Unification of PV & HVM interrupt infrastructure, bug fixes,
and misc Xen cleanups:
Submitted by: Roger Pau Monné
Sponsored by: Citrix Systems R&D
sys/x86/x86/local_apic.c:
sys/amd64/include/apicvar.h:
sys/i386/include/apicvar.h:
sys/amd64/amd64/apic_vector.S:
sys/i386/i386/apic_vector.s:
sys/amd64/amd64/machdep.c:
sys/i386/i386/machdep.c:
sys/i386/xen/exception.s:
sys/x86/include/segments.h:
Reserve IDT vector 0x93 for the Xen event channel upcall
interrupt handler. On Hypervisors that support the direct
vector callback feature, we can request that this vector be
called directly by an injected HVM interrupt event, instead
of a simulated PCI interrupt on the Xen platform PCI device.
This avoids all of the overhead of dealing with the emulated
I/O APIC and local APIC. It also means that the Hypervisor
can inject these events on any CPU, allowing upcalls for
different ports to be handled in parallel.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
Map Xen per-vcpu area during AP startup.
sys/amd64/include/intr_machdep.h:
sys/i386/include/intr_machdep.h:
Increase the FreeBSD IRQ vector table to include space
for event channel interrupt sources.
sys/amd64/include/pcpu.h:
sys/i386/include/pcpu.h:
Remove Xen HVM per-cpu variable data. These fields are now
allocated via the dynamic per-cpu scheme. See xen_intr.c
for details.
sys/amd64/include/xen/hypercall.h:
sys/dev/xen/blkback/blkback.c:
sys/i386/include/xen/xenvar.h:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/xen/gnttab.c:
Prefer FreeBSD primatives to Linux ones in Xen support code.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/console/xencons_ring.c:
sys/dev/xen/control/control.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/xenpci/xenpci.c:
sys/i386/i386/machdep.c:
sys/i386/include/pmap.h:
sys/i386/include/xen/xenfunc.h:
sys/i386/isa/npx.c:
sys/i386/xen/clock.c:
sys/i386/xen/mp_machdep.c:
sys/i386/xen/mptable.c:
sys/i386/xen/xen_clock_util.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/xen_rtc.c:
sys/xen/evtchn/evtchn_dev.c:
sys/xen/features.c:
sys/xen/gnttab.c:
sys/xen/gnttab.h:
sys/xen/hvm.h:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_if.m:
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenstore/xenstore.c:
sys/xen/xenstore/xenstore_dev.c:
sys/xen/xenstore/xenstorevar.h:
Pull common Xen OS support functions/settings into xen/xen-os.h.
sys/amd64/include/xen/xen-os.h:
sys/i386/include/xen/xen-os.h:
sys/xen/xen-os.h:
Remove constants, macros, and functions unused in FreeBSD's Xen
support.
sys/xen/xen-os.h:
sys/i386/xen/xen_machdep.c:
sys/x86/xen/hvm.c:
Introduce new functions xen_domain(), xen_pv_domain(), and
xen_hvm_domain(). These are used in favor of #ifdefs so that
FreeBSD can dynamically detect and adapt to the presence of
a hypervisor. The goal is to have an HVM optimized GENERIC,
but more is necessary before this is possible.
sys/amd64/amd64/machdep.c:
sys/dev/xen/xenpci/xenpcivar.h:
sys/dev/xen/xenpci/xenpci.c:
sys/x86/xen/hvm.c:
sys/sys/kernel.h:
Refactor magic ioport, Hypercall table and Hypervisor shared
information page setup, and move it to a dedicated HVM support
module.
HVM mode initialization is now triggered during the
SI_SUB_HYPERVISOR phase of system startup. This currently
occurs just after the kernel VM is fully setup which is
just enough infrastructure to allow the hypercall table
and shared info page to be properly mapped.
sys/xen/hvm.h:
sys/x86/xen/hvm.c:
Add definitions and a method for configuring Hypervisor event
delievery via a direct vector callback.
sys/amd64/include/xen/xen-os.h:
sys/x86/xen/hvm.c:
sys/conf/files:
sys/conf/files.amd64:
sys/conf/files.i386:
Adjust kernel build to reflect the refactoring of early
Xen startup code and Xen interrupt services.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
sys/dev/xen/control/control.c:
sys/dev/xen/evtchn/evtchn_dev.c:
sys/dev/xen/netback/netback.c:
sys/dev/xen/netfront/netfront.c:
sys/xen/xenstore/xenstore.c:
sys/xen/evtchn/evtchn_dev.c:
sys/dev/xen/console/console.c:
sys/dev/xen/console/xencons_ring.c
Adjust drivers to use new xen_intr_*() API.
sys/dev/xen/blkback/blkback.c:
Since blkback defers all event handling to a taskqueue,
convert this task queue to a "fast" taskqueue, and schedule
it via an interrupt filter. This avoids an unnecessary
ithread context switch.
sys/xen/xenstore/xenstore.c:
The xenstore driver is MPSAFE. Indicate as much when
registering its interrupt handler.
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbusvar.h:
Remove unused event channel APIs.
sys/xen/evtchn.h:
Remove all kernel Xen interrupt service API definitions
from this file. It is now only used for structure and
ioctl definitions related to the event channel userland
device driver.
Update the definitions in this file to match those from
NetBSD. Implementing this interface will be necessary for
Dom0 support.
sys/xen/evtchn/evtchnvar.h:
Add a header file for implemenation internal APIs related
to managing event channels event delivery. This is used
to allow, for example, the event channel userland device
driver to access low-level routines that typical kernel
consumers of event channel services should never access.
sys/xen/interface/event_channel.h:
sys/xen/xen_intr.h:
Standardize on the evtchn_port_t type for referring to
an event channel port id. In order to prevent low-level
event channel APIs from leaking to kernel consumers who
should not have access to this data, the type is defined
twice: Once in the Xen provided event_channel.h, and again
in xen/xen_intr.h. The double declaration is protected by
__XEN_EVTCHN_PORT_DEFINED__ to ensure it is never declared
twice within a given compilation unit.
sys/xen/xen_intr.h:
sys/xen/evtchn/evtchn.c:
sys/x86/xen/xen_intr.c:
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/xenpci/xenpcivar.h:
New implementation of Xen interrupt services. This is
similar in many respects to the i386 PV implementation with
the exception that events for bound to event channel ports
(i.e. not IPI, virtual IRQ, or physical IRQ) are further
optimized to avoid mask/unmask operations that aren't
necessary for these edge triggered events.
Stubs exist for supporting physical IRQ binding, but will
need additional work before this implementation can be
fully shared between PV and HVM.
sys/amd64/amd64/mp_machdep.c:
sys/i386/i386/mp_machdep.c:
sys/i386/xen/mp_machdep.c
sys/x86/xen/hvm.c:
Add support for placing vcpu_info into an arbritary memory
page instead of using HYPERVISOR_shared_info->vcpu_info.
This allows the creation of domains with more than 32 vcpus.
sys/i386/i386/machdep.c:
sys/i386/xen/clock.c:
sys/i386/xen/xen_machdep.c:
sys/i386/xen/exception.s:
Add support for new event channle implementation.
2013-08-29 19:52:18 +00:00
|
|
|
driver_filter_t filter, enum intr_type flags,
|
|
|
|
xen_intr_handle_t *port_handlep)
|
|
|
|
{
|
|
|
|
int acpi_id = pcpu_find(cpu)->pc_acpi_id;
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
struct evtchn_bind_ipi bind_ipi = { .vcpu = acpi_id };
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Ensure the target CPU is ready to handle evtchn interrupts. */
|
|
|
|
xen_intr_intrcnt_add(cpu);
|
|
|
|
|
|
|
|
isrc = NULL;
|
|
|
|
error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
|
|
|
|
if (error != 0) {
|
|
|
|
/*
|
|
|
|
* XXX Trap Hypercall error code Linuxisms in
|
|
|
|
* the HYPERCALL layer.
|
|
|
|
*/
|
|
|
|
return (-error);
|
|
|
|
}
|
|
|
|
|
|
|
|
error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
|
|
|
|
dev, filter, NULL, NULL, flags,
|
|
|
|
port_handlep);
|
|
|
|
if (error == 0)
|
|
|
|
error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
|
|
|
|
|
|
|
|
if (error != 0) {
|
|
|
|
evtchn_close_t close = { .port = bind_ipi.port };
|
|
|
|
|
|
|
|
xen_intr_unbind(*port_handlep);
|
|
|
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
|
|
|
|
panic("EVTCHNOP_close failed");
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isrc->xi_cpu != cpu) {
|
|
|
|
/*
|
|
|
|
* Too early in the boot process for the generic interrupt
|
|
|
|
* code to perform the binding. Update our event channel
|
|
|
|
* masks manually so events can't fire on the wrong cpu
|
|
|
|
* during AP startup.
|
|
|
|
*/
|
|
|
|
xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Event Channel API opened this port, so it is
|
|
|
|
* responsible for closing it automatically on unbind.
|
|
|
|
*/
|
|
|
|
isrc->xi_close = 1;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
char descr[MAXCOMLEN + 1];
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
isrc = xen_intr_isrc(port_handle);
|
|
|
|
if (isrc == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(descr, sizeof(descr), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
return (intr_describe(isrc->xi_vector, port_handle, descr));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xen_intr_unbind(xen_intr_handle_t *port_handlep)
|
|
|
|
{
|
|
|
|
struct intr_handler *handler;
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
|
|
|
|
handler = *port_handlep;
|
|
|
|
*port_handlep = NULL;
|
|
|
|
isrc = xen_intr_isrc(handler);
|
|
|
|
if (isrc == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
intr_remove_handler(handler);
|
|
|
|
xen_intr_release_isrc(isrc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xen_intr_signal(xen_intr_handle_t handle)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
|
|
|
|
isrc = xen_intr_isrc(handle);
|
|
|
|
if (isrc != NULL) {
|
|
|
|
KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
|
|
|
|
isrc->xi_type == EVTCHN_TYPE_IPI,
|
|
|
|
("evtchn_signal on something other than a local port"));
|
|
|
|
struct evtchn_send send = { .port = isrc->xi_port };
|
|
|
|
(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
evtchn_port_t
|
|
|
|
xen_intr_port(xen_intr_handle_t handle)
|
|
|
|
{
|
|
|
|
struct xenisrc *isrc;
|
|
|
|
|
|
|
|
isrc = xen_intr_isrc(handle);
|
|
|
|
if (isrc == NULL)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
return (isrc->xi_port);
|
|
|
|
}
|