Implement infrastructure for gluing i386 ipi functions in to xen's infrastructure

This commit is contained in:
Kip Macy 2008-10-21 06:39:40 +00:00
parent 756fb0605f
commit 764b7ef593
2 changed files with 151 additions and 29 deletions

View File

@ -184,14 +184,12 @@ call_evtchn_upcall:
hypervisor_callback_pending:
DISABLE_INTERRUPTS(%esi) /* cli */
jmp 10b
/*
* alltraps entry point. Interrupts are enabled if this was a trap
* gate (TGT), else disabled if this was an interrupt gate (IGT).
* Note that int0x80_syscall is a trap gate. Only page faults
* use an interrupt gate.
*/
SUPERALIGN_TEXT
.globl alltraps
.type alltraps,@function

View File

@ -86,14 +86,10 @@ __FBSDID("$FreeBSD$");
#include <machine/xen/xen-os.h>
#include <machine/xen/evtchn.h>
#include <machine/xen/xen_intr.h>
#include <machine/xen/hypervisor.h>
#include <xen/interface/vcpu.h>
#define WARMBOOT_TARGET 0
#define WARMBOOT_OFF (KERNBASE + 0x0467)
#define WARMBOOT_SEG (KERNBASE + 0x0469)
#define stop_cpus_with_nmi 0
@ -105,6 +101,10 @@ extern struct pcpu __pcpu[];
static int bootAP;
static union descriptor *bootAPgdt;
static DEFINE_PER_CPU(int, resched_irq);
static DEFINE_PER_CPU(int, callfunc_irq);
static char resched_name[NR_CPUS][15];
static char callfunc_name[NR_CPUS][15];
/* Free these after use */
void *bootstacks[MAXCPU];
@ -119,6 +119,8 @@ vm_offset_t smp_tlb_addr1;
vm_offset_t smp_tlb_addr2;
volatile int smp_tlb_wait;
typedef void call_data_func_t(uintptr_t , uintptr_t);
static u_int logical_cpus;
/* used to hold the AP's until we are ready to release them */
@ -141,8 +143,6 @@ int cpu_apic_ids[MAXCPU];
/* Holds pending bitmap based IPIs per CPU */
static volatile u_int cpu_ipi_pending[MAXCPU];
static u_int boot_address;
static void assign_cpu_ids(void);
static void set_interrupt_apic_ids(void);
int start_all_aps(void);
@ -300,6 +300,105 @@ cpu_mp_start(void)
}
static void
iv_rendezvous(uintptr_t a, uintptr_t b)
{
}
static void
iv_invltlb(uintptr_t a, uintptr_t b)
{
}
static void
iv_invlpg(uintptr_t a, uintptr_t b)
{
}
static void
iv_invlrng(uintptr_t a, uintptr_t b)
{
}
static void
iv_invlcache(uintptr_t a, uintptr_t b)
{
}
static void
iv_lazypmap(uintptr_t a, uintptr_t b)
{
}
static void
iv_bitmap_vector(uintptr_t a, uintptr_t b)
{
}
static call_data_func_t *ipi_vectors[IPI_BITMAP_VECTOR + 1] =
{ iv_rendezvous,
iv_invltlb,
iv_invlpg,
iv_invlrng,
iv_invlcache,
iv_lazypmap,
iv_bitmap_vector
};
/*
* Reschedule call back. Nothing to do,
* all the work is done automatically when
* we return from the interrupt.
*/
static void
smp_reschedule_interrupt(void *unused)
{
}
struct _call_data {
call_data_func_t *func;
uintptr_t arg1;
uintptr_t arg2;
atomic_t started;
atomic_t finished;
int wait;
};
static struct _call_data *call_data;
static void
smp_call_function_interrupt(void *unused)
{
call_data_func_t *func = call_data->func;
uintptr_t arg1 = call_data->arg1;
uintptr_t arg2 = call_data->arg2;
int wait = call_data->wait;
/*
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function
*/
mb();
atomic_inc(&call_data->started);
/*
* At this point the info structure may be out of scope unless wait==1
*/
(*func)(arg1, arg2);
if (wait) {
mb();
atomic_inc(&call_data->finished);
}
}
/*
* Print various information about the SMP system hardware and setup.
*/
@ -323,6 +422,46 @@ cpu_mp_announce(void)
}
}
static int
xen_smp_intr_init(unsigned int cpu)
{
int rc;
per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
sprintf(resched_name[cpu], "resched%u", cpu);
rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
cpu,
resched_name[cpu],
smp_reschedule_interrupt,
INTR_FAST);
per_cpu(resched_irq, cpu) = rc;
sprintf(callfunc_name[cpu], "callfunc%u", cpu);
rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
cpu,
callfunc_name[cpu],
smp_call_function_interrupt,
INTR_FAST);
if (rc < 0)
goto fail;
per_cpu(callfunc_irq, cpu) = rc;
if ((cpu != 0) && ((rc = ap_cpu_initclocks(cpu)) != 0))
goto fail;
return 0;
fail:
if (per_cpu(resched_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
if (per_cpu(callfunc_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
return rc;
}
#define MTOPSIZE (1<<(14 + PAGE_SHIFT))
/*
@ -336,8 +475,6 @@ init_secondary(void)
/* bootAP is set in start_ap() to our ID. */
PCPU_SET(currentldt, _default_ldt);
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
#if 0
@ -434,6 +571,7 @@ init_secondary(void)
smp_active = 1; /* historic */
}
xen_smp_intr_init(bootAP);
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
@ -533,15 +671,11 @@ assign_cpu_ids(void)
int
start_all_aps(void)
{
u_int32_t mpbioswarmvec;
int x,apic_id, cpu;
struct pcpu *pc;
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
/* save the current value of the warm-start vector */
mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
/* set up temporary P==V mapping for AP boot */
/* XXX this is a hack, we should boot the AP on its own stack/PTD */
@ -550,10 +684,6 @@ start_all_aps(void)
apic_id = cpu_apic_ids[cpu];
/* setup a vector to our boot code */
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
bootAP = cpu;
bootAPgdt = gdt + (512*cpu);
@ -600,9 +730,6 @@ start_all_aps(void)
/* build our map of 'other' CPUs */
PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
/* restore the warmstart vector */
*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
/* number of APs actually started */
@ -624,11 +751,8 @@ smp_trap_init(trap_info_t *trap_ctxt)
}
}
void
cpu_initialize_context(unsigned int cpu);
extern int nkpt;
void
static void
cpu_initialize_context(unsigned int cpu)
{
/* vcpu_guest_context_t is too large to allocate on the stack.
@ -645,7 +769,6 @@ cpu_initialize_context(unsigned int cpu)
* Page 0,[0-3] PTD
* Page 1, [4] boot stack
* Page [5] PDPT
*
*/
for (i = 0; i < NPGPTD + 2; i++) {
@ -796,8 +919,9 @@ smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
if (!(read_eflags() & PSL_I))
panic("%s: interrupts disabled", __func__);
mtx_lock_spin(&smp_ipi_mtx);
smp_tlb_addr1 = addr1;
smp_tlb_addr2 = addr2;
call_data->func = ipi_vectors[vector];
call_data->arg1 = addr1;
call_data->arg2 = addr2;
atomic_store_rel_int(&smp_tlb_wait, 0);
ipi_all_but_self(vector);
while (smp_tlb_wait < ncpu)