2008-08-12 07:36:56 +00:00
|
|
|
/******************************************************************************
|
|
|
|
* evtchn.c
|
|
|
|
*
|
|
|
|
* Communication via Xen event channels.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002-2005, K A Fraser
|
|
|
|
* Copyright (c) 2005-2006 Kip Macy
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/bus.h>
|
2009-03-11 15:30:12 +00:00
|
|
|
#include <sys/limits.h>
|
2008-08-12 07:36:56 +00:00
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/interrupt.h>
|
2008-10-23 07:20:43 +00:00
|
|
|
#include <sys/pcpu.h>
|
2009-03-11 15:30:12 +00:00
|
|
|
#include <sys/smp.h>
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
#include <machine/cpufunc.h>
|
|
|
|
#include <machine/intr_machdep.h>
|
2009-03-11 15:30:12 +00:00
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
#include <machine/xen/xen-os.h>
|
2009-03-11 15:30:12 +00:00
|
|
|
#include <machine/xen/xenvar.h>
|
2008-12-29 06:31:03 +00:00
|
|
|
#include <xen/xen_intr.h>
|
2008-08-12 07:36:56 +00:00
|
|
|
#include <machine/xen/synch_bitops.h>
|
2008-12-29 06:31:03 +00:00
|
|
|
#include <xen/evtchn.h>
|
|
|
|
#include <xen/hypervisor.h>
|
2008-10-23 07:20:43 +00:00
|
|
|
#include <sys/smp.h>
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
#include <xen/xen_intr.h>
|
|
|
|
#include <xen/evtchn.h>
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
static inline unsigned long __ffs(unsigned long word)
|
|
|
|
{
|
|
|
|
__asm__("bsfl %1,%0"
|
|
|
|
:"=r" (word)
|
|
|
|
:"rm" (word));
|
|
|
|
return word;
|
|
|
|
}
|
|
|
|
|
2012-10-26 17:31:35 +00:00
|
|
|
/*
|
|
|
|
* irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
|
|
|
|
* section, to set pcpu->ipending (etc...) properly, we
|
|
|
|
* must be able to get the icu lock, so it can't be
|
|
|
|
* under witness.
|
|
|
|
*/
|
2008-08-12 07:36:56 +00:00
|
|
|
static struct mtx irq_mapping_update_lock;
|
2012-10-26 17:31:35 +00:00
|
|
|
MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
static struct xenpic *xp;
|
|
|
|
struct xenpic_intsrc {
|
|
|
|
struct intsrc xp_intsrc;
|
2008-12-29 06:31:03 +00:00
|
|
|
void *xp_cookie;
|
2008-08-12 07:36:56 +00:00
|
|
|
uint8_t xp_vector;
|
|
|
|
boolean_t xp_masked;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct xenpic {
|
|
|
|
struct pic *xp_dynirq_pic;
|
|
|
|
struct pic *xp_pirq_pic;
|
|
|
|
uint16_t xp_numintr;
|
|
|
|
struct xenpic_intsrc xp_pins[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define TODO printf("%s: not implemented!\n", __func__)
|
|
|
|
|
|
|
|
/* IRQ <-> event-channel mappings. */
|
|
|
|
static int evtchn_to_irq[NR_EVENT_CHANNELS];
|
|
|
|
|
|
|
|
/* Packed IRQ information: binding type, sub-type index, and event channel. */
|
|
|
|
static uint32_t irq_info[NR_IRQS];
|
|
|
|
/* Binding types. */
|
|
|
|
enum {
|
|
|
|
IRQT_UNBOUND,
|
|
|
|
IRQT_PIRQ,
|
|
|
|
IRQT_VIRQ,
|
|
|
|
IRQT_IPI,
|
|
|
|
IRQT_LOCAL_PORT,
|
2008-10-24 07:57:48 +00:00
|
|
|
IRQT_CALLER_PORT,
|
|
|
|
_IRQT_COUNT
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
};
|
|
|
|
|
2008-10-24 07:57:48 +00:00
|
|
|
|
|
|
|
#define _IRQT_BITS 4
|
|
|
|
#define _EVTCHN_BITS 12
|
|
|
|
#define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
|
|
|
|
|
|
|
|
/* Constructor for packed IRQ information. */
|
|
|
|
static inline uint32_t
|
|
|
|
mk_irq_info(uint32_t type, uint32_t index, uint32_t evtchn)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
|
|
|
|
}
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
/* Constructor for packed IRQ information. */
|
2008-10-24 07:57:48 +00:00
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
/* Convenient shorthand for packed representation of an unbound IRQ. */
|
|
|
|
#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
|
2008-10-24 07:57:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Accessors for packed IRQ information.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline unsigned int evtchn_from_irq(int irq)
|
|
|
|
{
|
|
|
|
return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int index_from_irq(int irq)
|
|
|
|
{
|
|
|
|
return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int type_from_irq(int irq)
|
|
|
|
{
|
|
|
|
return irq_info[irq] >> (32 - _IRQT_BITS);
|
|
|
|
}
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2008-09-26 05:54:24 +00:00
|
|
|
/* IRQ <-> VIRQ mapping. */
|
|
|
|
|
|
|
|
/* IRQ <-> IPI mapping. */
|
2008-10-23 07:20:43 +00:00
|
|
|
#ifndef NR_IPIS
|
|
|
|
#ifdef SMP
|
|
|
|
#error "NR_IPIS not defined"
|
|
|
|
#endif
|
2008-08-12 07:36:56 +00:00
|
|
|
#define NR_IPIS 1
|
2008-09-26 05:54:24 +00:00
|
|
|
#endif
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
|
|
|
|
static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
|
|
|
|
|
|
|
|
/* Reference counts for bindings to IRQs. */
|
|
|
|
static int irq_bindcount[NR_IRQS];
|
|
|
|
|
|
|
|
#define VALID_EVTCHN(_chn) ((_chn) != 0)
|
|
|
|
|
2008-10-24 07:57:48 +00:00
|
|
|
#ifdef SMP
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2008-10-24 07:57:48 +00:00
|
|
|
static uint8_t cpu_evtchn[NR_EVENT_CHANNELS];
|
2013-06-17 01:43:07 +00:00
|
|
|
static unsigned long cpu_evtchn_mask[XEN_LEGACY_MAX_VCPUS][NR_EVENT_CHANNELS/LONG_BIT];
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
#define active_evtchns(cpu,sh,idx) \
|
|
|
|
((sh)->evtchn_pending[idx] & \
|
|
|
|
cpu_evtchn_mask[cpu][idx] & \
|
|
|
|
~(sh)->evtchn_mask[idx])
|
|
|
|
|
|
|
|
static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
|
|
|
|
{
|
|
|
|
clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
|
|
|
|
set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
|
|
|
|
cpu_evtchn[chn] = cpu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void init_evtchn_cpu_bindings(void)
|
|
|
|
{
|
|
|
|
/* By default all event channels notify CPU#0. */
|
|
|
|
memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
|
|
|
|
memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define cpu_from_evtchn(evtchn) (cpu_evtchn[evtchn])
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define active_evtchns(cpu,sh,idx) \
|
|
|
|
((sh)->evtchn_pending[idx] & \
|
|
|
|
~(sh)->evtchn_mask[idx])
|
|
|
|
#define bind_evtchn_to_cpu(chn,cpu) ((void)0)
|
|
|
|
#define init_evtchn_cpu_bindings() ((void)0)
|
|
|
|
#define cpu_from_evtchn(evtchn) (0)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Force a proper event-channel callback from Xen after clearing the
|
|
|
|
* callback mask. We do this in a very simple manner, by making a call
|
|
|
|
* down into Xen. The pending flag will be checked by Xen on return.
|
|
|
|
*/
|
|
|
|
void force_evtchn_callback(void)
|
|
|
|
{
|
|
|
|
(void)HYPERVISOR_xen_version(0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
evtchn_do_upcall(struct trapframe *frame)
|
|
|
|
{
|
|
|
|
unsigned long l1, l2;
|
|
|
|
unsigned int l1i, l2i, port;
|
|
|
|
int irq, cpu;
|
|
|
|
shared_info_t *s;
|
|
|
|
vcpu_info_t *vcpu_info;
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
cpu = PCPU_GET(cpuid);
|
2008-08-12 07:36:56 +00:00
|
|
|
s = HYPERVISOR_shared_info;
|
|
|
|
vcpu_info = &s->vcpu_info[cpu];
|
|
|
|
|
|
|
|
vcpu_info->evtchn_upcall_pending = 0;
|
|
|
|
|
|
|
|
/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
|
|
|
|
l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
|
|
|
|
|
|
|
|
while (l1 != 0) {
|
|
|
|
l1i = __ffs(l1);
|
|
|
|
l1 &= ~(1 << l1i);
|
|
|
|
|
|
|
|
while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
|
|
|
|
l2i = __ffs(l2);
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
port = (l1i * LONG_BIT) + l2i;
|
2008-08-12 07:36:56 +00:00
|
|
|
if ((irq = evtchn_to_irq[port]) != -1) {
|
|
|
|
struct intsrc *isrc = intr_lookup_source(irq);
|
|
|
|
/*
|
|
|
|
* ack
|
|
|
|
*/
|
|
|
|
mask_evtchn(port);
|
|
|
|
clear_evtchn(port);
|
|
|
|
|
|
|
|
intr_execute_handlers(isrc, frame);
|
|
|
|
} else {
|
|
|
|
evtchn_device_upcall(port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-30 08:53:13 +00:00
|
|
|
/*
|
|
|
|
* Send an IPI from the current CPU to the destination CPU.
|
|
|
|
*/
|
2008-09-26 05:54:24 +00:00
|
|
|
void
|
|
|
|
ipi_pcpu(unsigned int cpu, int vector)
|
|
|
|
{
|
2008-10-23 07:20:43 +00:00
|
|
|
int irq;
|
2008-09-26 05:54:24 +00:00
|
|
|
|
2009-05-30 08:53:13 +00:00
|
|
|
irq = pcpu_find(cpu)->pc_ipi_to_irq[vector];
|
2008-10-23 07:20:43 +00:00
|
|
|
|
2008-09-26 05:54:24 +00:00
|
|
|
notify_remote_via_irq(irq);
|
|
|
|
}
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
static int
|
|
|
|
find_unbound_irq(void)
|
|
|
|
{
|
|
|
|
int dynirq, irq;
|
|
|
|
|
|
|
|
for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
|
|
|
|
irq = dynirq_to_irq(dynirq);
|
|
|
|
if (irq_bindcount[irq] == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (irq == NR_IRQS)
|
|
|
|
panic("No available IRQ to bind to: increase NR_IRQS!\n");
|
|
|
|
|
|
|
|
return (irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2010-12-07 23:33:20 +00:00
|
|
|
bind_caller_port_to_irq(unsigned int caller_port, int * port)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
int irq;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
|
|
|
|
if ((irq = evtchn_to_irq[caller_port]) == -1) {
|
|
|
|
if ((irq = find_unbound_irq()) < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
evtchn_to_irq[caller_port] = irq;
|
|
|
|
irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_bindcount[irq]++;
|
2010-12-07 23:33:20 +00:00
|
|
|
*port = caller_port;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2010-12-07 23:33:20 +00:00
|
|
|
bind_local_port_to_irq(unsigned int local_port, int * port)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
int irq;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
KASSERT(evtchn_to_irq[local_port] == -1,
|
|
|
|
("evtchn_to_irq inconsistent"));
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
if ((irq = find_unbound_irq()) < 0) {
|
|
|
|
struct evtchn_close close = { .port = local_port };
|
2009-03-11 15:30:12 +00:00
|
|
|
HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
evtchn_to_irq[local_port] = irq;
|
|
|
|
irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
|
|
|
|
irq_bindcount[irq]++;
|
2010-12-07 23:33:20 +00:00
|
|
|
*port = local_port;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2010-12-07 23:33:20 +00:00
|
|
|
bind_listening_port_to_irq(unsigned int remote_domain, int * port)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
struct evtchn_alloc_unbound alloc_unbound;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
alloc_unbound.dom = DOMID_SELF;
|
|
|
|
alloc_unbound.remote_dom = remote_domain;
|
|
|
|
|
|
|
|
err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
|
|
|
|
&alloc_unbound);
|
|
|
|
|
2010-12-07 23:33:20 +00:00
|
|
|
return err ? : bind_local_port_to_irq(alloc_unbound.port, port);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
|
2010-12-07 23:33:20 +00:00
|
|
|
unsigned int remote_port, int * port)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
struct evtchn_bind_interdomain bind_interdomain;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
bind_interdomain.remote_dom = remote_domain;
|
|
|
|
bind_interdomain.remote_port = remote_port;
|
|
|
|
|
|
|
|
err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
|
|
|
|
&bind_interdomain);
|
|
|
|
|
2010-12-07 23:33:20 +00:00
|
|
|
return err ? : bind_local_port_to_irq(bind_interdomain.local_port, port);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2010-12-07 23:33:20 +00:00
|
|
|
bind_virq_to_irq(unsigned int virq, unsigned int cpu, int * port)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
struct evtchn_bind_virq bind_virq;
|
2008-12-29 06:31:03 +00:00
|
|
|
int evtchn = 0, irq;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
if ((irq = pcpu_find(cpu)->pc_virq_to_irq[virq]) == -1) {
|
2008-10-23 07:20:43 +00:00
|
|
|
if ((irq = find_unbound_irq()) < 0)
|
|
|
|
goto out;
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
bind_virq.virq = virq;
|
|
|
|
bind_virq.vcpu = cpu;
|
2009-03-11 15:30:12 +00:00
|
|
|
HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
evtchn = bind_virq.port;
|
|
|
|
|
|
|
|
evtchn_to_irq[evtchn] = irq;
|
|
|
|
irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
pcpu_find(cpu)->pc_virq_to_irq[virq] = irq;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
bind_evtchn_to_cpu(evtchn, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_bindcount[irq]++;
|
2010-12-07 23:33:20 +00:00
|
|
|
*port = evtchn;
|
2008-10-23 07:20:43 +00:00
|
|
|
out:
|
2008-08-12 07:36:56 +00:00
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
2008-10-24 07:57:48 +00:00
|
|
|
|
2010-12-07 23:33:20 +00:00
|
|
|
static int
|
|
|
|
bind_ipi_to_irq(unsigned int ipi, unsigned int cpu, int * port)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
struct evtchn_bind_ipi bind_ipi;
|
2008-12-29 06:31:03 +00:00
|
|
|
int irq;
|
|
|
|
int evtchn = 0;
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
if ((irq = pcpu_find(cpu)->pc_ipi_to_irq[ipi]) == -1) {
|
2008-08-12 07:36:56 +00:00
|
|
|
if ((irq = find_unbound_irq()) < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
bind_ipi.vcpu = cpu;
|
2009-03-11 15:30:12 +00:00
|
|
|
HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
|
2008-08-12 07:36:56 +00:00
|
|
|
evtchn = bind_ipi.port;
|
|
|
|
|
|
|
|
evtchn_to_irq[evtchn] = irq;
|
|
|
|
irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
pcpu_find(cpu)->pc_ipi_to_irq[ipi] = irq;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
bind_evtchn_to_cpu(evtchn, cpu);
|
|
|
|
}
|
|
|
|
irq_bindcount[irq]++;
|
2010-12-07 23:33:20 +00:00
|
|
|
*port = evtchn;
|
2008-08-12 07:36:56 +00:00
|
|
|
out:
|
|
|
|
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
|
|
|
|
return irq;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
static void
|
2008-08-12 07:36:56 +00:00
|
|
|
unbind_from_irq(int irq)
|
|
|
|
{
|
|
|
|
struct evtchn_close close;
|
|
|
|
int evtchn = evtchn_from_irq(irq);
|
2009-03-11 15:30:12 +00:00
|
|
|
int cpu;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
|
|
|
|
if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
|
|
|
|
close.port = evtchn;
|
2009-03-11 15:30:12 +00:00
|
|
|
HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
switch (type_from_irq(irq)) {
|
|
|
|
case IRQT_VIRQ:
|
2009-03-11 15:30:12 +00:00
|
|
|
cpu = cpu_from_evtchn(evtchn);
|
|
|
|
pcpu_find(cpu)->pc_virq_to_irq[index_from_irq(irq)] = -1;
|
2008-08-12 07:36:56 +00:00
|
|
|
break;
|
|
|
|
case IRQT_IPI:
|
2009-03-11 15:30:12 +00:00
|
|
|
cpu = cpu_from_evtchn(evtchn);
|
|
|
|
pcpu_find(cpu)->pc_ipi_to_irq[index_from_irq(irq)] = -1;
|
2008-08-12 07:36:56 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Closed ports are implicitly re-bound to VCPU0. */
|
|
|
|
bind_evtchn_to_cpu(evtchn, 0);
|
|
|
|
|
|
|
|
evtchn_to_irq[evtchn] = -1;
|
|
|
|
irq_info[irq] = IRQ_UNBOUND;
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
bind_caller_port_to_irqhandler(unsigned int caller_port,
|
2009-03-11 15:30:12 +00:00
|
|
|
const char *devname, driver_intr_t handler, void *arg,
|
|
|
|
unsigned long irqflags, unsigned int *irqp)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
unsigned int irq;
|
2010-12-07 23:33:20 +00:00
|
|
|
int port = -1;
|
2008-12-29 06:31:03 +00:00
|
|
|
int error;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2010-12-07 23:33:20 +00:00
|
|
|
irq = bind_caller_port_to_irq(caller_port, &port);
|
2008-08-12 07:36:56 +00:00
|
|
|
intr_register_source(&xp->xp_pins[irq].xp_intsrc);
|
2008-12-29 06:31:03 +00:00
|
|
|
error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
|
|
|
|
&xp->xp_pins[irq].xp_cookie);
|
|
|
|
|
|
|
|
if (error) {
|
2008-08-12 07:36:56 +00:00
|
|
|
unbind_from_irq(irq);
|
2008-12-29 06:31:03 +00:00
|
|
|
return (error);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
2010-12-07 23:33:20 +00:00
|
|
|
if (port != -1)
|
|
|
|
unmask_evtchn(port);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2008-12-29 06:31:03 +00:00
|
|
|
if (irqp)
|
|
|
|
*irqp = irq;
|
|
|
|
|
|
|
|
return (0);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2009-03-11 15:30:12 +00:00
|
|
|
bind_listening_port_to_irqhandler(unsigned int remote_domain,
|
|
|
|
const char *devname, driver_intr_t handler, void *arg,
|
|
|
|
unsigned long irqflags, unsigned int *irqp)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
unsigned int irq;
|
2010-12-07 23:33:20 +00:00
|
|
|
int port = -1;
|
2008-12-29 06:31:03 +00:00
|
|
|
int error;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2010-12-07 23:33:20 +00:00
|
|
|
irq = bind_listening_port_to_irq(remote_domain, &port);
|
2008-08-12 07:36:56 +00:00
|
|
|
intr_register_source(&xp->xp_pins[irq].xp_intsrc);
|
2008-12-29 06:31:03 +00:00
|
|
|
error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
|
|
|
|
&xp->xp_pins[irq].xp_cookie);
|
|
|
|
if (error) {
|
2008-08-12 07:36:56 +00:00
|
|
|
unbind_from_irq(irq);
|
2008-12-29 06:31:03 +00:00
|
|
|
return (error);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
2010-12-07 23:33:20 +00:00
|
|
|
if (port != -1)
|
|
|
|
unmask_evtchn(port);
|
2008-12-29 06:31:03 +00:00
|
|
|
if (irqp)
|
|
|
|
*irqp = irq;
|
|
|
|
|
|
|
|
return (0);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2009-03-11 15:30:12 +00:00
|
|
|
bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
|
|
|
|
unsigned int remote_port, const char *devname,
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
driver_intr_t handler, void *arg, unsigned long irqflags,
|
|
|
|
unsigned int *irqp)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
unsigned int irq;
|
2010-12-07 23:33:20 +00:00
|
|
|
int port = -1;
|
2008-12-29 06:31:03 +00:00
|
|
|
int error;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2010-12-07 23:33:20 +00:00
|
|
|
irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port, &port);
|
2008-08-12 07:36:56 +00:00
|
|
|
intr_register_source(&xp->xp_pins[irq].xp_intsrc);
|
Improve the Xen para-virtualized device infrastructure of FreeBSD:
o Add support for backend devices (e.g. blkback)
o Implement extensions to the Xen para-virtualized block API to allow
for larger and more outstanding I/Os.
o Import a completely rewritten block back driver with support for fronting
I/O to both raw devices and files.
o General cleanup and documentation of the XenBus and XenStore support code.
o Robustness and performance updates for the block front driver.
o Fixes to the netfront driver.
Sponsored by: Spectra Logic Corporation
sys/xen/xenbus/init.txt:
Deleted: This file explains the Linux method for XenBus device
enumeration and thus does not apply to FreeBSD's NewBus approach.
sys/xen/xenbus/xenbus_probe_backend.c:
Deleted: Linux version of backend XenBus service routines. It
was never ported to FreeBSD. See xenbusb.c, xenbusb_if.m,
xenbusb_front.c xenbusb_back.c for details of FreeBSD's XenBus
support.
sys/xen/xenbus/xenbusvar.h:
sys/xen/xenbus/xenbus_xs.c:
sys/xen/xenbus/xenbus_comms.c:
sys/xen/xenbus/xenbus_comms.h:
sys/xen/xenstore/xenstorevar.h:
sys/xen/xenstore/xenstore.c:
Split XenStore into its own tree. XenBus is a software layer built
on top of XenStore. The old arrangement and the naming of some
structures and functions blurred these lines making it difficult to
discern what services are provided by which layer and at what times
these services are available (e.g. during system startup and shutdown).
sys/xen/xenbus/xenbus_client.c:
sys/xen/xenbus/xenbus.c:
sys/xen/xenbus/xenbus_probe.c:
sys/xen/xenbus/xenbusb.c:
sys/xen/xenbus/xenbusb.h:
Split up XenBus code into methods available for use by client
drivers (xenbus.c) and code used by the XenBus "bus code" to
enumerate, attach, detach, and service bus drivers.
sys/xen/reboot.c:
sys/dev/xen/control/control.c:
Add a XenBus front driver for handling shutdown, reboot, suspend, and
resume events published in the XenStore. Move all PV suspend/reboot
support from reboot.c into this driver.
sys/xen/blkif.h:
New file from Xen vendor with macros and structures used by
a block back driver to service requests from a VM running a
different ABI (e.g. amd64 back with i386 front).
sys/conf/files:
Adjust kernel build spec for new XenBus/XenStore layout and added
Xen functionality.
sys/dev/xen/balloon/balloon.c:
sys/dev/xen/netfront/netfront.c:
sys/dev/xen/blkfront/blkfront.c:
sys/xen/xenbus/...
sys/xen/xenstore/...
o Rename XenStore APIs and structures from xenbus_* to xs_*.
o Adjust to use of M_XENBUS and M_XENSTORE malloc types for allocation
of objects returned by these APIs.
o Adjust for changes in the bus interface for Xen drivers.
sys/xen/xenbus/...
sys/xen/xenstore/...
Add Doxygen comments for these interfaces and the code that
implements them.
sys/dev/xen/blkback/blkback.c:
o Rewrite the Block Back driver to attach properly via newbus,
operate correctly in both PV and HVM mode regardless of domain
(e.g. can be in a DOM other than 0), and to deal with the latest
metadata available in XenStore for block devices.
o Allow users to specify a file as a backend to blkback, in addition
to character devices. Use the namei lookup of the backend path
to automatically configure, based on file type, the appropriate
backend method.
The current implementation is limited to a single outstanding I/O
at a time to file backed storage.
sys/dev/xen/blkback/blkback.c:
sys/xen/interface/io/blkif.h:
sys/xen/blkif.h:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Extend the Xen blkif API: Negotiable request size and number of
requests.
This change extends the information recorded in the XenStore
allowing block front/back devices to negotiate for optimal I/O
parameters. This has been achieved without sacrificing backward
compatibility with drivers that are unaware of these protocol
enhancements. The extensions center around the connection protocol
which now includes these additions:
o The back-end device publishes its maximum supported values for,
request I/O size, the number of page segments that can be
associated with a request, the maximum number of requests that
can be concurrently active, and the maximum number of pages that
can be in the shared request ring. These values are published
before the back-end enters the XenbusStateInitWait state.
o The front-end waits for the back-end to enter either the InitWait
or Initialize state. At this point, the front end limits it's
own capabilities to the lesser of the values it finds published
by the backend, it's own maximums, or, should any back-end data
be missing in the store, the values supported by the original
protocol. It then initializes it's internal data structures
including allocation of the shared ring, publishes its maximum
capabilities to the XenStore and transitions to the Initialized
state.
o The back-end waits for the front-end to enter the Initalized
state. At this point, the back end limits it's own capabilities
to the lesser of the values it finds published by the frontend,
it's own maximums, or, should any front-end data be missing in
the store, the values supported by the original protocol. It
then initializes it's internal data structures, attaches to the
shared ring and transitions to the Connected state.
o The front-end waits for the back-end to enter the Connnected
state, transitions itself to the connected state, and can
commence I/O.
Although an updated front-end driver must be aware of the back-end's
InitWait state, the back-end has been coded such that it can
tolerate a front-end that skips this step and transitions directly
to the Initialized state without waiting for the back-end.
sys/xen/interface/io/blkif.h:
o Increase BLKIF_MAX_SEGMENTS_PER_REQUEST to 255. This is
the maximum number possible without changing the blkif
request header structure (nr_segs is a uint8_t).
o Add two new constants:
BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK, and
BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK. These respectively
indicate the number of segments that can fit in the first
ring-buffer entry of a request, and for each subsequent
(sg element only) ring-buffer entry associated with the
"header" ring-buffer entry of the request.
o Add the blkif_request_segment_t typedef for segment
elements.
o Add the BLKRING_GET_SG_REQUEST() macro which wraps the
RING_GET_REQUEST() macro and returns a properly cast
pointer to an array of blkif_request_segment_ts.
o Add the BLKIF_SEGS_TO_BLOCKS() macro which calculates the
number of ring entries that will be consumed by a blkif
request with the given number of segments.
sys/xen/blkif.h:
o Update for changes in interface/io/blkif.h macros.
o Update the BLKIF_MAX_RING_REQUESTS() macro to take the
ring size as an argument to allow this calculation on
multi-page rings.
o Add a companion macro to BLKIF_MAX_RING_REQUESTS(),
BLKIF_RING_PAGES(). This macro determines the number of
ring pages required in order to support a ring with the
supplied number of request blocks.
sys/dev/xen/blkback/blkback.c:
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
o Negotiate with the other-end with the following limits:
Reqeust Size: MAXPHYS
Max Segments: (MAXPHYS/PAGE_SIZE) + 1
Max Requests: 256
Max Ring Pages: Sufficient to support Max Requests with
Max Segments.
o Dynamically allocate request pools and segemnts-per-request.
o Update ring allocation/attachment code to support a
multi-page shared ring.
o Update routines that access the shared ring to handle
multi-block requests.
sys/dev/xen/blkfront/blkfront.c:
o Track blkfront allocations in a blkfront driver specific
malloc pool.
o Strip out XenStore transaction retry logic in the
connection code. Transactions only need to be used when
the update to multiple XenStore nodes must be atomic.
That is not the case here.
o Fully disable blkif_resume() until it can be fixed
properly (it didn't work before this change).
o Destroy bus-dma objects during device instance tear-down.
o Properly handle backend devices with powef-of-2 sector
sizes larger than 512b.
sys/dev/xen/blkback/blkback.c:
Advertise support for and implement the BLKIF_OP_WRITE_BARRIER
and BLKIF_OP_FLUSH_DISKCACHE blkif opcodes using BIO_FLUSH and
the BIO_ORDERED attribute of bios.
sys/dev/xen/blkfront/blkfront.c:
sys/dev/xen/blkfront/block.h:
Fix various bugs in blkfront.
o gnttab_alloc_grant_references() returns 0 for success and
non-zero for failure. The check for < 0 is a leftover
Linuxism.
o When we negotiate with blkback and have to reduce some of our
capabilities, print out the original and reduced capability before
changing the local capability. So the user now gets the correct
information.
o Fix blkif_restart_queue_callback() formatting. Make sure we hold
the mutex in that function before calling xb_startio().
o Fix a couple of KASSERT()s.
o Fix a check in the xb_remove_* macro to be a little more specific.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Define GNTTAB_LIST_END publicly as GRANT_REF_INVALID.
sys/dev/xen/netfront/netfront.c:
Use GRANT_REF_INVALID instead of driver private definitions of the
same constant.
sys/xen/gnttab.h:
sys/xen/gnttab.c:
Add the gnttab_end_foreign_access_references() API.
This API allows a client to batch the release of an array of grant
references, instead of coding a private for loop. The implementation
takes advantage of this batching to reduce lock overhead to one
acquisition and release per-batch instead of per-freed grant reference.
While here, reduce the duration the gnttab_list_lock is held during
gnttab_free_grant_references() operations. The search to find the
tail of the incoming free list does not rely on global state and so
can be performed without holding the lock.
sys/dev/xen/xenpci/evtchn.c:
sys/dev/xen/evtchn/evtchn.c:
sys/xen/xen_intr.h:
o Implement the bind_interdomain_evtchn_to_irqhandler API for HVM mode.
This allows an HVM domain to serve back end devices to other domains.
This API is already implemented for PV mode.
o Synchronize the API between HVM and PV.
sys/dev/xen/xenpci/xenpci.c:
o Scan the full region of CPUID space in which the Xen VMM interface
may be implemented. On systems using SuSE as a Dom0 where the
Viridian API is also exported, the VMM interface is above the region
we used to search.
o Pass through bus_alloc_resource() calls so that XenBus drivers
attaching on an HVM system can allocate unused physical address
space from the nexus. The block back driver makes use of this
facility.
sys/i386/xen/xen_machdep.c:
Use the correct type for accessing the statically mapped xenstore
metadata.
sys/xen/interface/hvm/params.h:
sys/xen/xenstore/xenstore.c:
Move hvm_get_parameter() to the correct global header file instead
of as a private method to the XenStore.
sys/xen/interface/io/protocols.h:
Sync with vendor.
sys/xeninterface/io/ring.h:
Add macro for calculating the number of ring pages needed for an N
deep ring.
To avoid duplication within the macros, create and use the new
__RING_HEADER_SIZE() macro. This macro calculates the size of the
ring book keeping struct (producer/consumer indexes, etc.) that
resides at the head of the ring.
Add the __RING_PAGES() macro which calculates the number of shared
ring pages required to support a ring with the given number of
requests.
These APIs are used to support the multi-page ring version of the
Xen block API.
sys/xeninterface/io/xenbus.h:
Add Comments.
sys/xen/xenbus/...
o Refactor the FreeBSD XenBus support code to allow for both front and
backend device attachments.
o Make use of new config_intr_hook capabilities to allow front and back
devices to be probed/attached in parallel.
o Fix bugs in probe/attach state machine that could cause the system to
hang when confronted with a failure either in the local domain or in
a remote domain to which one of our driver instances is attaching.
o Publish all required state to the XenStore on device detach and
failure. The majority of the missing functionality was for serving
as a back end since the typical "hot-plug" scripts in Dom0 don't
handle the case of cleaning up for a "service domain" that is not
itself.
o Add dynamic sysctl nodes exposing the generic ivars of
XenBus devices.
o Add doxygen style comments to the majority of the code.
o Cleanup types, formatting, etc.
sys/xen/xenbus/xenbusb.c:
Common code used by both front and back XenBus busses.
sys/xen/xenbus/xenbusb_if.m:
Method definitions for a XenBus bus.
sys/xen/xenbus/xenbusb_front.c:
sys/xen/xenbus/xenbusb_back.c:
XenBus bus specialization for front and back devices.
MFC after: 1 month
2010-10-19 20:53:30 +00:00
|
|
|
error = intr_add_handler(devname, irq, NULL, handler, arg,
|
2008-12-29 06:31:03 +00:00
|
|
|
irqflags, &xp->xp_pins[irq].xp_cookie);
|
|
|
|
if (error) {
|
2008-08-12 07:36:56 +00:00
|
|
|
unbind_from_irq(irq);
|
2008-12-29 06:31:03 +00:00
|
|
|
return (error);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
2010-12-07 23:33:20 +00:00
|
|
|
if (port != -1)
|
|
|
|
unmask_evtchn(port);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2008-12-29 06:31:03 +00:00
|
|
|
if (irqp)
|
|
|
|
*irqp = irq;
|
|
|
|
return (0);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2009-03-11 15:30:12 +00:00
|
|
|
bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
|
|
|
|
const char *devname, driver_filter_t filter, driver_intr_t handler,
|
2009-04-01 17:06:28 +00:00
|
|
|
void *arg, unsigned long irqflags, unsigned int *irqp)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
|
|
|
unsigned int irq;
|
2010-12-07 23:33:20 +00:00
|
|
|
int port = -1;
|
2008-12-29 06:31:03 +00:00
|
|
|
int error;
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2010-12-07 23:33:20 +00:00
|
|
|
irq = bind_virq_to_irq(virq, cpu, &port);
|
2008-08-12 07:36:56 +00:00
|
|
|
intr_register_source(&xp->xp_pins[irq].xp_intsrc);
|
2008-12-29 06:31:03 +00:00
|
|
|
error = intr_add_handler(devname, irq, filter, handler,
|
|
|
|
arg, irqflags, &xp->xp_pins[irq].xp_cookie);
|
|
|
|
if (error) {
|
2008-08-12 07:36:56 +00:00
|
|
|
unbind_from_irq(irq);
|
2008-12-29 06:31:03 +00:00
|
|
|
return (error);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
2010-12-07 23:33:20 +00:00
|
|
|
if (port != -1)
|
|
|
|
unmask_evtchn(port);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2008-12-29 06:31:03 +00:00
|
|
|
if (irqp)
|
|
|
|
*irqp = irq;
|
|
|
|
return (0);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2009-03-11 15:30:12 +00:00
|
|
|
bind_ipi_to_irqhandler(unsigned int ipi, unsigned int cpu,
|
|
|
|
const char *devname, driver_filter_t filter,
|
|
|
|
unsigned long irqflags, unsigned int *irqp)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
2008-12-29 06:31:03 +00:00
|
|
|
unsigned int irq;
|
2010-12-07 23:33:20 +00:00
|
|
|
int port = -1;
|
2008-12-29 06:31:03 +00:00
|
|
|
int error;
|
2008-10-23 07:20:43 +00:00
|
|
|
|
2010-12-07 23:33:20 +00:00
|
|
|
irq = bind_ipi_to_irq(ipi, cpu, &port);
|
2008-08-12 07:36:56 +00:00
|
|
|
intr_register_source(&xp->xp_pins[irq].xp_intsrc);
|
2008-12-29 06:31:03 +00:00
|
|
|
error = intr_add_handler(devname, irq, filter, NULL,
|
|
|
|
NULL, irqflags, &xp->xp_pins[irq].xp_cookie);
|
|
|
|
if (error) {
|
2008-08-12 07:36:56 +00:00
|
|
|
unbind_from_irq(irq);
|
2008-12-29 06:31:03 +00:00
|
|
|
return (error);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
2010-12-07 23:33:20 +00:00
|
|
|
if (port != -1)
|
|
|
|
unmask_evtchn(port);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
2008-12-29 06:31:03 +00:00
|
|
|
if (irqp)
|
|
|
|
*irqp = irq;
|
|
|
|
return (0);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-12-29 06:31:03 +00:00
|
|
|
unbind_from_irqhandler(unsigned int irq)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
2008-12-29 06:31:03 +00:00
|
|
|
intr_remove_handler(xp->xp_pins[irq].xp_cookie);
|
2008-08-12 07:36:56 +00:00
|
|
|
unbind_from_irq(irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
/* Rebind an evtchn so that it gets delivered to a specific cpu */
|
|
|
|
static void
|
|
|
|
rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
|
|
|
|
{
|
|
|
|
evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
|
|
|
|
int evtchn;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
|
|
|
|
evtchn = evtchn_from_irq(irq);
|
|
|
|
if (!VALID_EVTCHN(evtchn)) {
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send future instances of this interrupt to other vcpu. */
|
|
|
|
bind_vcpu.port = evtchn;
|
|
|
|
bind_vcpu.vcpu = tcpu;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this fails, it usually just indicates that we're dealing with a
|
|
|
|
* virq or IPI channel, which don't actually need to be rebound. Ignore
|
|
|
|
* it, but don't do the xenlinux-level rebind in that case.
|
|
|
|
*/
|
|
|
|
if (HYPERVISOR_event_channel_op(&op) >= 0)
|
|
|
|
bind_evtchn_to_cpu(evtchn, tcpu);
|
|
|
|
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
static void set_affinity_irq(unsigned irq, cpumask_t dest)
|
2008-08-12 07:36:56 +00:00
|
|
|
{
|
2009-03-11 15:30:12 +00:00
|
|
|
unsigned tcpu = ffs(dest) - 1;
|
2008-08-12 07:36:56 +00:00
|
|
|
rebind_irq_to_cpu(irq, tcpu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interface to generic handling in intr_machdep.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*------------ interrupt handling --------------------------------------*/
|
|
|
|
#define TODO printf("%s: not implemented!\n", __func__)
|
|
|
|
|
|
|
|
|
|
|
|
static void xenpic_dynirq_enable_source(struct intsrc *isrc);
|
|
|
|
static void xenpic_dynirq_disable_source(struct intsrc *isrc, int);
|
|
|
|
static void xenpic_dynirq_eoi_source(struct intsrc *isrc);
|
|
|
|
static void xenpic_dynirq_enable_intr(struct intsrc *isrc);
|
2010-12-30 01:28:56 +00:00
|
|
|
static void xenpic_dynirq_disable_intr(struct intsrc *isrc);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
static void xenpic_pirq_enable_source(struct intsrc *isrc);
|
|
|
|
static void xenpic_pirq_disable_source(struct intsrc *isrc, int);
|
|
|
|
static void xenpic_pirq_eoi_source(struct intsrc *isrc);
|
|
|
|
static void xenpic_pirq_enable_intr(struct intsrc *isrc);
|
|
|
|
|
|
|
|
|
|
|
|
static int xenpic_vector(struct intsrc *isrc);
|
|
|
|
static int xenpic_source_pending(struct intsrc *isrc);
|
|
|
|
static void xenpic_suspend(struct pic* pic);
|
|
|
|
static void xenpic_resume(struct pic* pic);
|
2009-07-21 16:54:11 +00:00
|
|
|
static int xenpic_assign_cpu(struct intsrc *, u_int apic_id);
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
|
|
|
|
struct pic xenpic_dynirq_template = {
|
|
|
|
.pic_enable_source = xenpic_dynirq_enable_source,
|
|
|
|
.pic_disable_source = xenpic_dynirq_disable_source,
|
|
|
|
.pic_eoi_source = xenpic_dynirq_eoi_source,
|
|
|
|
.pic_enable_intr = xenpic_dynirq_enable_intr,
|
2010-12-30 01:28:56 +00:00
|
|
|
.pic_disable_intr = xenpic_dynirq_disable_intr,
|
2008-08-12 07:36:56 +00:00
|
|
|
.pic_vector = xenpic_vector,
|
|
|
|
.pic_source_pending = xenpic_source_pending,
|
|
|
|
.pic_suspend = xenpic_suspend,
|
|
|
|
.pic_resume = xenpic_resume
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pic xenpic_pirq_template = {
|
|
|
|
.pic_enable_source = xenpic_pirq_enable_source,
|
|
|
|
.pic_disable_source = xenpic_pirq_disable_source,
|
|
|
|
.pic_eoi_source = xenpic_pirq_eoi_source,
|
|
|
|
.pic_enable_intr = xenpic_pirq_enable_intr,
|
|
|
|
.pic_vector = xenpic_vector,
|
|
|
|
.pic_source_pending = xenpic_source_pending,
|
|
|
|
.pic_suspend = xenpic_suspend,
|
2008-09-25 07:01:31 +00:00
|
|
|
.pic_resume = xenpic_resume,
|
|
|
|
.pic_assign_cpu = xenpic_assign_cpu
|
2008-08-12 07:36:56 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2008-09-25 07:01:31 +00:00
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
void
|
|
|
|
xenpic_dynirq_enable_source(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
unsigned int irq;
|
|
|
|
struct xenpic_intsrc *xp;
|
|
|
|
|
|
|
|
xp = (struct xenpic_intsrc *)isrc;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
if (xp->xp_masked) {
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
unmask_evtchn(evtchn_from_irq(irq));
|
|
|
|
xp->xp_masked = FALSE;
|
|
|
|
}
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
|
|
|
|
{
|
|
|
|
unsigned int irq;
|
|
|
|
struct xenpic_intsrc *xp;
|
|
|
|
|
|
|
|
xp = (struct xenpic_intsrc *)isrc;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
if (!xp->xp_masked) {
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
mask_evtchn(evtchn_from_irq(irq));
|
|
|
|
xp->xp_masked = TRUE;
|
|
|
|
}
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xenpic_dynirq_enable_intr(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
unsigned int irq;
|
|
|
|
struct xenpic_intsrc *xp;
|
|
|
|
|
|
|
|
xp = (struct xenpic_intsrc *)isrc;
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
xp->xp_masked = 0;
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
unmask_evtchn(evtchn_from_irq(irq));
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
2010-12-30 01:28:56 +00:00
|
|
|
static void
|
|
|
|
xenpic_dynirq_disable_intr(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
unsigned int irq;
|
|
|
|
struct xenpic_intsrc *xp;
|
|
|
|
|
|
|
|
xp = (struct xenpic_intsrc *)isrc;
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
mask_evtchn(evtchn_from_irq(irq));
|
|
|
|
xp->xp_masked = 1;
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
2008-08-12 07:36:56 +00:00
|
|
|
static void
|
|
|
|
xenpic_dynirq_eoi_source(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
unsigned int irq;
|
|
|
|
struct xenpic_intsrc *xp;
|
|
|
|
|
|
|
|
xp = (struct xenpic_intsrc *)isrc;
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
xp->xp_masked = 0;
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
unmask_evtchn(evtchn_from_irq(irq));
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xenpic_vector(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
struct xenpic_intsrc *pin;
|
|
|
|
|
|
|
|
pin = (struct xenpic_intsrc *)isrc;
|
|
|
|
//printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
|
|
|
|
|
|
|
|
return (pin->xp_vector);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
xenpic_source_pending(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
|
|
|
|
|
|
|
|
/* XXXEN: TODO */
|
|
|
|
printf("xenpic_source_pending(): vector=%x,masked=%x\n",
|
|
|
|
pin->xp_vector, pin->xp_masked);
|
|
|
|
|
|
|
|
/* notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xenpic_suspend(struct pic* pic)
|
|
|
|
{
|
|
|
|
TODO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xenpic_resume(struct pic* pic)
|
|
|
|
{
|
|
|
|
TODO;
|
2008-09-25 07:01:31 +00:00
|
|
|
}
|
|
|
|
|
2009-07-21 16:54:11 +00:00
|
|
|
static int
|
2008-09-25 07:01:31 +00:00
|
|
|
xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
|
|
|
|
{
|
|
|
|
TODO;
|
2009-07-21 16:54:11 +00:00
|
|
|
return (EOPNOTSUPP);
|
2008-09-25 07:01:31 +00:00
|
|
|
}
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
notify_remote_via_irq(int irq)
|
|
|
|
{
|
|
|
|
int evtchn = evtchn_from_irq(irq);
|
|
|
|
|
|
|
|
if (VALID_EVTCHN(evtchn))
|
|
|
|
notify_remote_via_evtchn(evtchn);
|
2008-10-23 07:20:43 +00:00
|
|
|
else
|
2009-03-11 15:30:12 +00:00
|
|
|
panic("invalid evtchn %d", irq);
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* required for support of physical devices */
|
|
|
|
static inline void
|
|
|
|
pirq_unmask_notify(int pirq)
|
|
|
|
{
|
|
|
|
struct physdev_eoi eoi = { .irq = pirq };
|
|
|
|
|
|
|
|
if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
|
|
|
|
(void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
pirq_query_unmask(int pirq)
|
|
|
|
{
|
|
|
|
struct physdev_irq_status_query irq_status_query;
|
|
|
|
|
|
|
|
irq_status_query.irq = pirq;
|
|
|
|
(void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
|
|
|
|
clear_bit(pirq, &pirq_needs_unmask_notify[0]);
|
|
|
|
if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
|
|
|
|
set_bit(pirq, &pirq_needs_unmask_notify[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On startup, if there is no action associated with the IRQ then we are
|
|
|
|
* probing. In this case we should not share with others as it will confuse us.
|
|
|
|
*/
|
|
|
|
#define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
|
|
|
|
|
|
|
|
static void
|
|
|
|
xenpic_pirq_enable_intr(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
struct evtchn_bind_pirq bind_pirq;
|
|
|
|
int evtchn;
|
|
|
|
unsigned int irq;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
evtchn = evtchn_from_irq(irq);
|
|
|
|
|
|
|
|
if (VALID_EVTCHN(evtchn))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
bind_pirq.pirq = irq;
|
|
|
|
/* NB. We are happy to share unless we are probing. */
|
|
|
|
bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
|
|
|
|
|
|
|
|
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
|
2008-10-23 07:20:43 +00:00
|
|
|
#ifndef XEN_PRIVILEGED_GUEST
|
|
|
|
panic("unexpected pirq call");
|
|
|
|
#endif
|
2008-08-12 07:36:56 +00:00
|
|
|
if (!probing_irq(irq)) /* Some failures are expected when probing. */
|
|
|
|
printf("Failed to obtain physical IRQ %d\n", irq);
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
evtchn = bind_pirq.port;
|
|
|
|
|
|
|
|
pirq_query_unmask(irq_to_pirq(irq));
|
|
|
|
|
|
|
|
bind_evtchn_to_cpu(evtchn, 0);
|
|
|
|
evtchn_to_irq[evtchn] = irq;
|
|
|
|
irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
|
|
|
|
|
|
|
|
out:
|
|
|
|
unmask_evtchn(evtchn);
|
|
|
|
pirq_unmask_notify(irq_to_pirq(irq));
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xenpic_pirq_enable_source(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
int evtchn;
|
|
|
|
unsigned int irq;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
evtchn = evtchn_from_irq(irq);
|
|
|
|
|
|
|
|
if (!VALID_EVTCHN(evtchn))
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
unmask_evtchn(evtchn);
|
|
|
|
pirq_unmask_notify(irq_to_pirq(irq));
|
|
|
|
done:
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
|
|
|
|
{
|
|
|
|
int evtchn;
|
|
|
|
unsigned int irq;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
evtchn = evtchn_from_irq(irq);
|
|
|
|
|
|
|
|
if (!VALID_EVTCHN(evtchn))
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
mask_evtchn(evtchn);
|
|
|
|
done:
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
xenpic_pirq_eoi_source(struct intsrc *isrc)
|
|
|
|
{
|
|
|
|
int evtchn;
|
|
|
|
unsigned int irq;
|
|
|
|
|
|
|
|
mtx_lock_spin(&irq_mapping_update_lock);
|
|
|
|
irq = xenpic_vector(isrc);
|
|
|
|
evtchn = evtchn_from_irq(irq);
|
|
|
|
|
|
|
|
if (!VALID_EVTCHN(evtchn))
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
unmask_evtchn(evtchn);
|
|
|
|
pirq_unmask_notify(irq_to_pirq(irq));
|
|
|
|
done:
|
|
|
|
mtx_unlock_spin(&irq_mapping_update_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
irq_to_evtchn_port(int irq)
|
|
|
|
{
|
|
|
|
return evtchn_from_irq(irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
mask_evtchn(int port)
|
|
|
|
{
|
|
|
|
shared_info_t *s = HYPERVISOR_shared_info;
|
|
|
|
synch_set_bit(port, &s->evtchn_mask[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
unmask_evtchn(int port)
|
|
|
|
{
|
|
|
|
shared_info_t *s = HYPERVISOR_shared_info;
|
2009-03-11 15:30:12 +00:00
|
|
|
unsigned int cpu = PCPU_GET(cpuid);
|
2008-08-12 07:36:56 +00:00
|
|
|
vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
|
|
|
|
|
|
|
|
/* Slow path (hypercall) if this is a non-local port. */
|
|
|
|
if (unlikely(cpu != cpu_from_evtchn(port))) {
|
|
|
|
struct evtchn_unmask unmask = { .port = port };
|
|
|
|
(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
synch_clear_bit(port, &s->evtchn_mask);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following is basically the equivalent of 'hw_resend_irq'. Just
|
|
|
|
* like a real IO-APIC we 'lose the interrupt edge' if the channel is
|
|
|
|
* masked.
|
|
|
|
*/
|
|
|
|
if (synch_test_bit(port, &s->evtchn_pending) &&
|
2009-03-11 15:30:12 +00:00
|
|
|
!synch_test_and_set_bit(port / LONG_BIT,
|
2008-08-12 07:36:56 +00:00
|
|
|
&vcpu_info->evtchn_pending_sel)) {
|
|
|
|
vcpu_info->evtchn_upcall_pending = 1;
|
|
|
|
if (!vcpu_info->evtchn_upcall_mask)
|
|
|
|
force_evtchn_callback();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void irq_resume(void)
|
|
|
|
{
|
|
|
|
evtchn_op_t op;
|
|
|
|
int cpu, pirq, virq, ipi, irq, evtchn;
|
|
|
|
|
|
|
|
struct evtchn_bind_virq bind_virq;
|
|
|
|
struct evtchn_bind_ipi bind_ipi;
|
|
|
|
|
|
|
|
init_evtchn_cpu_bindings();
|
|
|
|
|
|
|
|
/* New event-channel space is not 'live' yet. */
|
|
|
|
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
|
|
|
|
mask_evtchn(evtchn);
|
|
|
|
|
|
|
|
/* Check that no PIRQs are still bound. */
|
2009-03-11 15:30:12 +00:00
|
|
|
for (pirq = 0; pirq < NR_PIRQS; pirq++) {
|
|
|
|
KASSERT(irq_info[pirq_to_irq(pirq)] == IRQ_UNBOUND,
|
|
|
|
("pirq_to_irq inconsistent"));
|
|
|
|
}
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
/* Secondary CPUs must have no VIRQ or IPI bindings. */
|
2013-06-17 01:43:07 +00:00
|
|
|
for (cpu = 1; cpu < XEN_LEGACY_MAX_VCPUS; cpu++) {
|
2009-03-11 15:30:12 +00:00
|
|
|
for (virq = 0; virq < NR_VIRQS; virq++) {
|
|
|
|
KASSERT(pcpu_find(cpu)->pc_virq_to_irq[virq] == -1,
|
|
|
|
("virq_to_irq inconsistent"));
|
|
|
|
}
|
|
|
|
for (ipi = 0; ipi < NR_IPIS; ipi++) {
|
|
|
|
KASSERT(pcpu_find(cpu)->pc_ipi_to_irq[ipi] == -1,
|
|
|
|
("ipi_to_irq inconsistent"));
|
|
|
|
}
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* No IRQ <-> event-channel mappings. */
|
|
|
|
for (irq = 0; irq < NR_IRQS; irq++)
|
|
|
|
irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
|
|
|
|
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
|
|
|
|
evtchn_to_irq[evtchn] = -1;
|
|
|
|
|
|
|
|
/* Primary CPU: rebind VIRQs automatically. */
|
|
|
|
for (virq = 0; virq < NR_VIRQS; virq++) {
|
2009-03-11 15:30:12 +00:00
|
|
|
if ((irq = pcpu_find(0)->pc_virq_to_irq[virq]) == -1)
|
2008-08-12 07:36:56 +00:00
|
|
|
continue;
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
KASSERT(irq_info[irq] == mk_irq_info(IRQT_VIRQ, virq, 0),
|
|
|
|
("irq_info inconsistent"));
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
/* Get a new binding from Xen. */
|
|
|
|
bind_virq.virq = virq;
|
|
|
|
bind_virq.vcpu = 0;
|
2009-03-11 15:30:12 +00:00
|
|
|
HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
|
2008-08-12 07:36:56 +00:00
|
|
|
evtchn = bind_virq.port;
|
|
|
|
|
|
|
|
/* Record the new mapping. */
|
|
|
|
evtchn_to_irq[evtchn] = irq;
|
|
|
|
irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
|
|
|
|
|
|
|
|
/* Ready for use. */
|
|
|
|
unmask_evtchn(evtchn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Primary CPU: rebind IPIs automatically. */
|
|
|
|
for (ipi = 0; ipi < NR_IPIS; ipi++) {
|
2009-03-11 15:30:12 +00:00
|
|
|
if ((irq = pcpu_find(0)->pc_ipi_to_irq[ipi]) == -1)
|
2008-08-12 07:36:56 +00:00
|
|
|
continue;
|
|
|
|
|
2009-03-11 15:30:12 +00:00
|
|
|
KASSERT(irq_info[irq] == mk_irq_info(IRQT_IPI, ipi, 0),
|
|
|
|
("irq_info inconsistent"));
|
2008-08-12 07:36:56 +00:00
|
|
|
|
|
|
|
/* Get a new binding from Xen. */
|
|
|
|
memset(&op, 0, sizeof(op));
|
|
|
|
bind_ipi.vcpu = 0;
|
2009-03-11 15:30:12 +00:00
|
|
|
HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
|
2008-08-12 07:36:56 +00:00
|
|
|
evtchn = bind_ipi.port;
|
|
|
|
|
|
|
|
/* Record the new mapping. */
|
|
|
|
evtchn_to_irq[evtchn] = irq;
|
|
|
|
irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
|
|
|
|
|
|
|
|
/* Ready for use. */
|
|
|
|
unmask_evtchn(evtchn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
evtchn_init(void *dummy __unused)
|
|
|
|
{
|
|
|
|
int i, cpu;
|
|
|
|
struct xenpic_intsrc *pin, *tpin;
|
|
|
|
|
2008-10-24 07:57:48 +00:00
|
|
|
|
|
|
|
init_evtchn_cpu_bindings();
|
|
|
|
|
|
|
|
/* No VIRQ or IPI bindings. */
|
2008-10-23 07:20:43 +00:00
|
|
|
for (cpu = 0; cpu < mp_ncpus; cpu++) {
|
2008-08-12 07:36:56 +00:00
|
|
|
for (i = 0; i < NR_VIRQS; i++)
|
2009-03-11 15:30:12 +00:00
|
|
|
pcpu_find(cpu)->pc_virq_to_irq[i] = -1;
|
2008-08-12 07:36:56 +00:00
|
|
|
for (i = 0; i < NR_IPIS; i++)
|
2009-03-11 15:30:12 +00:00
|
|
|
pcpu_find(cpu)->pc_ipi_to_irq[i] = -1;
|
2008-08-12 07:36:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* No event-channel -> IRQ mappings. */
|
|
|
|
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
|
|
|
|
evtchn_to_irq[i] = -1;
|
|
|
|
mask_evtchn(i); /* No event channels are 'live' right now. */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No IRQ -> event-channel mappings. */
|
|
|
|
for (i = 0; i < NR_IRQS; i++)
|
|
|
|
irq_info[i] = IRQ_UNBOUND;
|
|
|
|
|
|
|
|
xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc),
|
|
|
|
M_DEVBUF, M_WAITOK);
|
|
|
|
|
|
|
|
xp->xp_dynirq_pic = &xenpic_dynirq_template;
|
|
|
|
xp->xp_pirq_pic = &xenpic_pirq_template;
|
|
|
|
xp->xp_numintr = NR_IRQS;
|
|
|
|
bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
|
|
|
|
|
|
|
|
|
|
|
|
/* We need to register our PIC's beforehand */
|
|
|
|
if (intr_register_pic(&xenpic_pirq_template))
|
|
|
|
panic("XEN: intr_register_pic() failure");
|
|
|
|
if (intr_register_pic(&xenpic_dynirq_template))
|
|
|
|
panic("XEN: intr_register_pic() failure");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the dynamic IRQ's - we initialize the structures, but
|
|
|
|
* we do not bind them (bind_evtchn_to_irqhandle() does this)
|
|
|
|
*/
|
|
|
|
pin = xp->xp_pins;
|
|
|
|
for (i = 0; i < NR_DYNIRQS; i++) {
|
|
|
|
/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
|
|
|
|
irq_bindcount[dynirq_to_irq(i)] = 0;
|
|
|
|
|
|
|
|
tpin = &pin[dynirq_to_irq(i)];
|
|
|
|
tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
|
|
|
|
tpin->xp_vector = dynirq_to_irq(i);
|
|
|
|
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Now, we go ahead and claim every PIRQ there is.
|
|
|
|
*/
|
|
|
|
pin = xp->xp_pins;
|
|
|
|
for (i = 0; i < NR_PIRQS; i++) {
|
|
|
|
/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
|
|
|
|
irq_bindcount[pirq_to_irq(i)] = 0;
|
|
|
|
|
|
|
|
#ifdef RTC_IRQ
|
|
|
|
/* If not domain 0, force our RTC driver to fail its probe. */
|
|
|
|
if ((i == RTC_IRQ) &&
|
|
|
|
!(xen_start_info->flags & SIF_INITDOMAIN))
|
|
|
|
continue;
|
|
|
|
#endif
|
|
|
|
tpin = &pin[pirq_to_irq(i)];
|
|
|
|
tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
|
|
|
|
tpin->xp_vector = pirq_to_irq(i);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-23 07:20:43 +00:00
|
|
|
SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_MIDDLE, evtchn_init, NULL);
|
2008-08-12 07:36:56 +00:00
|
|
|
|