Add an explicit exit code 'SPINUP_AP' to tell the controlling process that an

AP needs to be activated by spinning up an execution context for it.

The local apic emulation is now completely done in the hypervisor and it will
detect writes to the ICR_LO register that try to bring up the AP. In response
to such writes it will return to userspace with an exit code of SPINUP_AP.

Reviewed by: grehan
This commit is contained in:
Neel Natu 2012-09-25 02:33:25 +00:00
parent 98ed632c63
commit edf89256dd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bhyve/; revision=240912
8 changed files with 307 additions and 224 deletions

View File

@ -228,6 +228,7 @@ enum vm_exitcode {
VM_EXITCODE_MTRAP,
VM_EXITCODE_PAUSE,
VM_EXITCODE_PAGING,
VM_EXITCODE_SPINUP_AP,
VM_EXITCODE_MAX
};
@ -260,6 +261,10 @@ struct vm_exit {
uint32_t code; /* ecx value */
uint64_t wval;
} msr;
struct {
int vcpu;
uint64_t rip;
} spinup_ap;
} u;
};

View File

@ -1253,6 +1253,14 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
vm_exit_update_rip(vmexit);
vmexit->rip += vmexit->inst_length;
vmexit->inst_length = 0;
/*
* Special case for spinning up an AP - exit to userspace to
* give the controlling process a chance to intercept and
* spin up a thread for the AP.
*/
if (vmexit->exitcode == VM_EXITCODE_SPINUP_AP)
handled = 0;
} else {
if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
/*

View File

@ -89,6 +89,12 @@ static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
#define x2apic(vlapic) ((vlapic)->msr_apicbase & APICBASE_X2APIC)
enum boot_state {
BS_INIT,
BS_SIPI,
BS_RUNNING
};
struct vlapic {
struct vm *vm;
int vcpuid;
@ -112,6 +118,7 @@ struct vlapic {
int isrvec_stk_top;
uint64_t msr_apicbase;
enum boot_state boot_state;
};
static void
@ -168,6 +175,11 @@ vlapic_op_reset(void* dev)
memset(lapic, 0, sizeof(*lapic));
lapic->apr = vlapic->vcpuid;
vlapic_init_ipi(vlapic);
if (vlapic->vcpuid == 0)
vlapic->boot_state = BS_RUNNING; /* BSP */
else
vlapic->boot_state = BS_INIT; /* AP */
return 0;
@ -418,6 +430,8 @@ lapic_process_icr(struct vlapic *vlapic, uint64_t icrval)
int i;
cpuset_t dmask;
uint32_t dest, vec, mode;
struct vlapic *vlapic2;
struct vm_exit *vmexit;
dest = icrval >> 32;
vec = icrval & APIC_VECTOR_MASK;
@ -452,11 +466,46 @@ lapic_process_icr(struct vlapic *vlapic, uint64_t icrval)
return (0); /* handled completely in the kernel */
}
/*
* XXX this assumes that the startup IPI always succeeds
*/
if (mode == APIC_DELMODE_STARTUP)
vm_activate_cpu(vlapic->vm, dest);
if (mode == APIC_DELMODE_INIT) {
if ((icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT)
return (0);
if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) {
vlapic2 = vm_lapic(vlapic->vm, dest);
/* move from INIT to waiting-for-SIPI state */
if (vlapic2->boot_state == BS_INIT) {
vlapic2->boot_state = BS_SIPI;
}
return (0);
}
}
if (mode == APIC_DELMODE_STARTUP) {
if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) {
vlapic2 = vm_lapic(vlapic->vm, dest);
/*
* Ignore SIPIs in any state other than wait-for-SIPI
*/
if (vlapic2->boot_state != BS_SIPI)
return (0);
vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid);
vmexit->exitcode = VM_EXITCODE_SPINUP_AP;
vmexit->u.spinup_ap.vcpu = dest;
vmexit->u.spinup_ap.rip = vec << PAGE_SHIFT;
/*
* XXX this assumes that the startup IPI always succeeds
*/
vlapic2->boot_state = BS_RUNNING;
vm_activate_cpu(vlapic2->vm, dest);
return (0);
}
}
/*
* This will cause a return to userland.

View File

@ -8,6 +8,7 @@ SRCS= atpic.c consport.c dbgport.c elcr.c fbsdrun.c inout.c
SRCS+= instruction_emul.c ioapic.c mevent.c
SRCS+= pci_emul.c pci_hostbridge.c pci_passthru.c pci_virtio_block.c
SRCS+= pci_virtio_net.c pci_uart.c pit_8254.c post.c rtc.c uart.c xmsr.c
SRCS+= spinup_ap.c
NO_MAN=

View File

@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include "xmsr.h"
#include "instruction_emul.h"
#include "ioapic.h"
#include "spinup_ap.h"
#define DEFAULT_GUEST_HZ 100
#define DEFAULT_GUEST_TSLICE 200
@ -345,6 +346,23 @@ vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
return (retval);
}
static int
vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
{
int newcpu;
int retval = VMEXIT_CONTINUE;
newcpu = spinup_ap(ctx, *pvcpu,
vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
if (guest_vcpu_mux && *pvcpu != newcpu) {
retval = VMEXIT_SWITCH;
*pvcpu = newcpu;
}
return (retval);
}
static int
vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
{
@ -471,7 +489,8 @@ static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
[VM_EXITCODE_RDMSR] = vmexit_rdmsr,
[VM_EXITCODE_WRMSR] = vmexit_wrmsr,
[VM_EXITCODE_MTRAP] = vmexit_mtrap,
[VM_EXITCODE_PAGING] = vmexit_paging
[VM_EXITCODE_PAGING] = vmexit_paging,
[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
};
static void

180
usr.sbin/bhyve/spinup_ap.c Normal file
View File

@ -0,0 +1,180 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/types.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "fbsdrun.h"
#include "spinup_ap.h"
/*
* Trampoline for hypervisor direct 64-bit jump.
*
* 0 - signature for guest->host verification
* 8 - kernel virtual address of trampoline
* 16 - instruction virtual address
* 24 - stack pointer virtual address
* 32 - CR3, physical address of kernel page table
* 40 - 24-byte area for null/code/data GDT entries
*/
#define MP_V64T_SIG 0xcafebabecafebabeULL
struct mp_v64tramp {
uint64_t mt_sig;
uint64_t mt_virt;
uint64_t mt_eip;
uint64_t mt_rsp;
uint64_t mt_cr3;
uint64_t mt_gdtr[3];
};
static void
spinup_ap_realmode(struct vmctx *ctx, int newcpu, uint64_t *rip)
{
int vector, error;
uint16_t cs;
uint64_t desc_base;
uint32_t desc_limit, desc_access;
vector = *rip >> PAGE_SHIFT;
*rip = 0;
/*
* Update the %cs and %rip of the guest so that it starts
* executing real mode code at at 'vector << 12'.
*/
error = vm_set_register(ctx, newcpu, VM_REG_GUEST_RIP, *rip);
assert(error == 0);
error = vm_get_desc(ctx, newcpu, VM_REG_GUEST_CS, &desc_base,
&desc_limit, &desc_access);
assert(error == 0);
desc_base = vector << PAGE_SHIFT;
error = vm_set_desc(ctx, newcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
assert(error == 0);
cs = (vector << PAGE_SHIFT) >> 4;
error = vm_set_register(ctx, newcpu, VM_REG_GUEST_CS, cs);
assert(error == 0);
}
static void
spinup_ap_direct64(struct vmctx *ctx, int newcpu, uint64_t *rip)
{
struct mp_v64tramp *mvt;
char *errstr;
int error;
uint64_t gdtbase;
mvt = paddr_guest2host(*rip);
assert(mvt->mt_sig == MP_V64T_SIG);
/*
* Set up the 3-entry GDT using memory supplied in the
* guest's trampoline structure.
*/
vm_setup_freebsd_gdt(mvt->mt_gdtr);
#define CHECK_ERROR(msg) \
if (error != 0) { \
errstr = msg; \
goto err_exit; \
}
/* entry point */
*rip = mvt->mt_eip;
/* Get the guest virtual address of the GDT */
gdtbase = mvt->mt_virt + __offsetof(struct mp_v64tramp, mt_gdtr);
error = vm_setup_freebsd_registers(ctx, newcpu, mvt->mt_eip,
mvt->mt_cr3, gdtbase, mvt->mt_rsp);
CHECK_ERROR("vm_setup_freebsd_registers");
return;
err_exit:
printf("spinup_ap_direct64: machine state error: %s", errstr);
exit(1);
}
int
spinup_ap(struct vmctx *ctx, int vcpu, int newcpu, uint64_t rip)
{
int error;
assert(newcpu != 0);
assert(newcpu < guest_ncpus);
error = vcpu_reset(ctx, newcpu);
assert(error == 0);
/* Set up capabilities */
if (fbsdrun_vmexit_on_hlt()) {
error = vm_set_capability(ctx, newcpu, VM_CAP_HALT_EXIT, 1);
assert(error == 0);
}
if (fbsdrun_vmexit_on_pause()) {
error = vm_set_capability(ctx, newcpu, VM_CAP_PAUSE_EXIT, 1);
assert(error == 0);
}
/*
* There are 2 startup modes possible here:
* - if the CPU supports 'unrestricted guest' mode, the spinup can
* set up the processor state in power-on 16-bit mode, with the CS:IP
* init'd to the specified low-mem 4K page.
* - if the guest has requested a 64-bit trampoline in the low-mem 4K
* page by placing in the specified signature, set up the register
* state using register state in the signature. Note that this
* requires accessing guest physical memory to read the signature
* while 'unrestricted mode' does not.
*/
error = vm_set_capability(ctx, newcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
if (error) {
spinup_ap_direct64(ctx, newcpu, &rip);
} else {
spinup_ap_realmode(ctx, newcpu, &rip);
}
fbsdrun_addcpu(ctx, newcpu, rip);
return (newcpu);
}

View File

@ -0,0 +1,34 @@
/*-
* Copyright (c) 2012 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SPINUP_AP_H_
#define _SPINUP_AP_H_
int spinup_ap(struct vmctx *ctx, int vcpu, int newcpu, uint64_t rip);
#endif

View File

@ -29,233 +29,20 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <x86/apicreg.h>
#include <sys/types.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <machine/vmm.h>
#include <vmmapi.h>
#include "fbsdrun.h"
#include <stdio.h>
#include <stdlib.h>
#include "xmsr.h"
/*
* Trampoline for hypervisor direct 64-bit jump.
*
* 0 - signature for guest->host verification
* 8 - kernel virtual address of trampoline
* 16 - instruction virtual address
* 24 - stack pointer virtual address
* 32 - CR3, physical address of kernel page table
* 40 - 24-byte area for null/code/data GDT entries
*/
#define MP_V64T_SIG 0xcafebabecafebabeULL
struct mp_v64tramp {
uint64_t mt_sig;
uint64_t mt_virt;
uint64_t mt_eip;
uint64_t mt_rsp;
uint64_t mt_cr3;
uint64_t mt_gdtr[3];
};
/*
* CPU 0 is considered to be the BSP and is set to the RUNNING state.
* All other CPUs are set up in the INIT state.
*/
#define BSP 0
enum cpu_bstate {
CPU_S_INIT,
CPU_S_SIPI,
CPU_S_RUNNING
} static cpu_b[VM_MAXCPU] = { [BSP] = CPU_S_RUNNING };
static void spinup_ap(struct vmctx *, int, int, uint64_t *);
static void spinup_ap_direct64(struct vmctx *, int, uintptr_t, uint64_t *);
int
emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t val)
{
int dest;
int mode;
int thiscpu;
int vec;
int error, retval;
uint64_t rip;
retval = vcpu;
thiscpu = 1 << vcpu;
/*
* The only MSR value handled is the x2apic CR register
*/
if (code != 0x830) {
printf("Unknown WRMSR code %x, val %lx, cpu %d\n",
code, val, vcpu);
exit(1);
}
/*
* The value written to the MSR will generate an IPI to
* a set of CPUs. If this is a SIPI, create the initial
* state for the CPU and switch to it. Otherwise, inject
* an interrupt for the destination CPU(s), and request
* a switch to the next available one by returning -1
*/
dest = val >> 32;
vec = val & APIC_VECTOR_MASK;
mode = val & APIC_DELMODE_MASK;
switch (mode) {
case APIC_DELMODE_INIT:
/*
* Ignore legacy de-assert INITs in x2apic mode
*/
if ((val & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT) {
break;
}
assert(dest != 0);
assert(dest < guest_ncpus);
assert(cpu_b[dest] == CPU_S_INIT);
/*
* Move CPU to wait-for-SIPI state
*/
error = vcpu_reset(ctx, dest);
assert(error == 0);
cpu_b[dest] = CPU_S_SIPI;
break;
case APIC_DELMODE_STARTUP:
assert(dest != 0);
assert(dest < guest_ncpus);
/*
* Ignore SIPIs in any state other than wait-for-SIPI
*/
if (cpu_b[dest] != CPU_S_SIPI) {
break;
}
/*
* Bring up the AP and signal the main loop that it is
* available and to switch to it.
*/
spinup_ap(ctx, dest, vec, &rip);
cpu_b[dest] = CPU_S_RUNNING;
fbsdrun_addcpu(ctx, dest, rip);
retval = dest;
break;
default:
printf("APIC delivery mode %lx not supported!\n",
val & APIC_DELMODE_MASK);
exit(1);
}
return (retval);
}
/*
* There are 2 startup modes possible here:
* - if the CPU supports 'unrestricted guest' mode, the spinup can
* set up the processor state in power-on 16-bit mode, with the CS:IP
* init'd to the specified low-mem 4K page.
* - if the guest has requested a 64-bit trampoline in the low-mem 4K
* page by placing in the specified signature, set up the register
* state using register state in the signature. Note that this
* requires accessing guest physical memory to read the signature
* while 'unrestricted mode' does not.
*/
static void
spinup_ap(struct vmctx *ctx, int newcpu, int vector, uint64_t *rip)
{
int error;
uint16_t cs;
uint64_t desc_base;
uint32_t desc_limit, desc_access;
if (fbsdrun_vmexit_on_hlt()) {
error = vm_set_capability(ctx, newcpu, VM_CAP_HALT_EXIT, 1);
assert(error == 0);
}
if (fbsdrun_vmexit_on_pause()) {
error = vm_set_capability(ctx, newcpu, VM_CAP_PAUSE_EXIT, 1);
assert(error == 0);
}
error = vm_set_capability(ctx, newcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
if (error) {
/*
* If the guest does not support real-mode execution then
* we will bring up the AP directly in 64-bit mode.
*/
spinup_ap_direct64(ctx, newcpu, vector << PAGE_SHIFT, rip);
} else {
/*
* Update the %cs and %rip of the guest so that it starts
* executing real mode code at at 'vector << 12'.
*/
*rip = 0;
error = vm_set_register(ctx, newcpu, VM_REG_GUEST_RIP, *rip);
assert(error == 0);
error = vm_get_desc(ctx, newcpu, VM_REG_GUEST_CS, &desc_base,
&desc_limit, &desc_access);
assert(error == 0);
desc_base = vector << PAGE_SHIFT;
error = vm_set_desc(ctx, newcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
assert(error == 0);
cs = (vector << PAGE_SHIFT) >> 4;
error = vm_set_register(ctx, newcpu, VM_REG_GUEST_CS, cs);
assert(error == 0);
}
}
static void
spinup_ap_direct64(struct vmctx *ctx, int newcpu, uintptr_t gaddr,
uint64_t *rip)
{
struct mp_v64tramp *mvt;
char *errstr;
int error;
uint64_t gdtbase;
mvt = paddr_guest2host(gaddr);
assert(mvt->mt_sig == MP_V64T_SIG);
/*
* Set up the 3-entry GDT using memory supplied in the
* guest's trampoline structure.
*/
vm_setup_freebsd_gdt(mvt->mt_gdtr);
#define CHECK_ERROR(msg) \
if (error != 0) { \
errstr = msg; \
goto err_exit; \
}
/* entry point */
*rip = mvt->mt_eip;
/* Get the guest virtual address of the GDT */
gdtbase = mvt->mt_virt + __offsetof(struct mp_v64tramp, mt_gdtr);
error = vm_setup_freebsd_registers(ctx, newcpu, mvt->mt_eip,
mvt->mt_cr3, gdtbase, mvt->mt_rsp);
CHECK_ERROR("vm_setup_freebsd_registers");
return;
err_exit:
printf("spinup_ap_direct64: machine state error: %s", errstr);
printf("Unknown WRMSR code %x, val %lx, cpu %d\n", code, val, vcpu);
exit(1);
}