If a vcpu disables its local apic and then executes a 'HLT' then spin down the
vcpu and destroy its thread context. Also modify the 'HLT' processing to ignore pending interrupts in the IRR if interrupts have been disabled by the guest. The interrupt cannot be injected into the guest in any case so resuming it is futile. With this change "halt" from a Linux guest works correctly. Reviewed by: grehan@ Tested by: Tycho Nightingale (tycho.nightingale@pluribusnetworks.com)
This commit is contained in:
parent
8991c54091
commit
1c05219285
@ -264,6 +264,7 @@ enum vm_exitcode {
|
||||
VM_EXITCODE_PAGING,
|
||||
VM_EXITCODE_INST_EMUL,
|
||||
VM_EXITCODE_SPINUP_AP,
|
||||
VM_EXITCODE_SPINDOWN_CPU,
|
||||
VM_EXITCODE_MAX
|
||||
};
|
||||
|
||||
@ -307,6 +308,9 @@ struct vm_exit {
|
||||
int vcpu;
|
||||
uint64_t rip;
|
||||
} spinup_ap;
|
||||
struct {
|
||||
uint64_t rflags;
|
||||
} hlt;
|
||||
} u;
|
||||
};
|
||||
|
||||
|
@ -1336,7 +1336,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||
struct vmcs *vmcs;
|
||||
struct vmxctx *vmxctx;
|
||||
uint32_t eax, ecx, edx, idtvec_info, idtvec_err, reason;
|
||||
uint64_t qual, gpa;
|
||||
uint64_t qual, gpa, rflags;
|
||||
|
||||
handled = 0;
|
||||
vmcs = &vmx->vmcs[vcpu];
|
||||
@ -1406,7 +1406,10 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||
break;
|
||||
case EXIT_REASON_HLT:
|
||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
|
||||
if ((error = vmread(VMCS_GUEST_RFLAGS, &rflags)) != 0)
|
||||
panic("vmx_exit_process: vmread(rflags) %d", error);
|
||||
vmexit->exitcode = VM_EXITCODE_HLT;
|
||||
vmexit->u.hlt.rflags = rflags;
|
||||
break;
|
||||
case EXIT_REASON_MTF:
|
||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
|
||||
|
@ -53,6 +53,9 @@ __FBSDID("$FreeBSD$");
|
||||
#define VLAPIC_CTR1(vlapic, format, p1) \
|
||||
VCPU_CTR1((vlapic)->vm, (vlapic)->vcpuid, format, p1)
|
||||
|
||||
#define VLAPIC_CTR2(vlapic, format, p1, p2) \
|
||||
VCPU_CTR2((vlapic)->vm, (vlapic)->vcpuid, format, p1, p2)
|
||||
|
||||
#define VLAPIC_CTR_IRR(vlapic, msg) \
|
||||
do { \
|
||||
uint32_t *irrptr = &(vlapic)->apic.irr0; \
|
||||
@ -221,6 +224,12 @@ vlapic_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
|
||||
if (vector < 0 || vector >= 256)
|
||||
panic("vlapic_set_intr_ready: invalid vector %d\n", vector);
|
||||
|
||||
if (!(lapic->svr & APIC_SVR_ENABLE)) {
|
||||
VLAPIC_CTR1(vlapic, "vlapic is software disabled, ignoring "
|
||||
"interrupt %d", vector);
|
||||
return;
|
||||
}
|
||||
|
||||
idx = (vector / 32) * 4;
|
||||
mask = 1 << (vector % 32);
|
||||
|
||||
@ -593,6 +602,25 @@ vlapic_intr_accepted(struct vlapic *vlapic, int vector)
|
||||
vlapic_update_ppr(vlapic);
|
||||
}
|
||||
|
||||
static void
|
||||
lapic_set_svr(struct vlapic *vlapic, uint32_t new)
|
||||
{
|
||||
struct LAPIC *lapic;
|
||||
uint32_t old, changed;
|
||||
|
||||
lapic = &vlapic->apic;
|
||||
old = lapic->svr;
|
||||
changed = old ^ new;
|
||||
if ((changed & APIC_SVR_ENABLE) != 0) {
|
||||
if ((new & APIC_SVR_ENABLE) == 0) {
|
||||
VLAPIC_CTR0(vlapic, "vlapic is software-disabled");
|
||||
} else {
|
||||
VLAPIC_CTR0(vlapic, "vlapic is software-enabled");
|
||||
}
|
||||
}
|
||||
lapic->svr = new;
|
||||
}
|
||||
|
||||
int
|
||||
vlapic_read(struct vlapic *vlapic, uint64_t offset, uint64_t *data)
|
||||
{
|
||||
@ -602,7 +630,7 @@ vlapic_read(struct vlapic *vlapic, uint64_t offset, uint64_t *data)
|
||||
|
||||
if (offset > sizeof(*lapic)) {
|
||||
*data = 0;
|
||||
return 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
offset &= ~3;
|
||||
@ -680,6 +708,8 @@ vlapic_read(struct vlapic *vlapic, uint64_t offset, uint64_t *data)
|
||||
*data = 0;
|
||||
break;
|
||||
}
|
||||
done:
|
||||
VLAPIC_CTR2(vlapic, "vlapic read offset %#x, data %#lx", offset, *data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -690,6 +720,8 @@ vlapic_write(struct vlapic *vlapic, uint64_t offset, uint64_t data)
|
||||
uint32_t *reg;
|
||||
int retval;
|
||||
|
||||
VLAPIC_CTR2(vlapic, "vlapic write offset %#x, data %#lx", offset, data);
|
||||
|
||||
if (offset > sizeof(*lapic)) {
|
||||
return 0;
|
||||
}
|
||||
@ -712,7 +744,7 @@ vlapic_write(struct vlapic *vlapic, uint64_t offset, uint64_t data)
|
||||
case APIC_OFFSET_DFR:
|
||||
break;
|
||||
case APIC_OFFSET_SVR:
|
||||
lapic->svr = data;
|
||||
lapic_set_svr(vlapic, data);
|
||||
break;
|
||||
case APIC_OFFSET_ICR_LOW:
|
||||
if (!x2apic(vlapic)) {
|
||||
@ -887,3 +919,15 @@ vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
|
||||
if (state == X2APIC_DISABLED)
|
||||
vlapic->msr_apicbase &= ~APICBASE_X2APIC;
|
||||
}
|
||||
|
||||
bool
|
||||
vlapic_enabled(struct vlapic *vlapic)
|
||||
{
|
||||
struct LAPIC *lapic = &vlapic->apic;
|
||||
|
||||
if ((vlapic->msr_apicbase & APICBASE_ENABLED) != 0 &&
|
||||
(lapic->svr & APIC_SVR_ENABLE) != 0)
|
||||
return (true);
|
||||
else
|
||||
return (false);
|
||||
}
|
||||
|
@ -100,5 +100,6 @@ int vlapic_timer_tick(struct vlapic *vlapic);
|
||||
uint64_t vlapic_get_apicbase(struct vlapic *vlapic);
|
||||
void vlapic_set_apicbase(struct vlapic *vlapic, uint64_t val);
|
||||
void vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state s);
|
||||
bool vlapic_enabled(struct vlapic *vlapic);
|
||||
|
||||
#endif /* _VLAPIC_H_ */
|
||||
|
@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/vm.h>
|
||||
#include <machine/pcb.h>
|
||||
#include <machine/smp.h>
|
||||
#include <x86/psl.h>
|
||||
#include <x86/apicreg.h>
|
||||
#include <machine/vmparam.h>
|
||||
|
||||
@ -859,8 +860,10 @@ vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
|
||||
* Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
|
||||
*/
|
||||
static int
|
||||
vm_handle_hlt(struct vm *vm, int vcpuid, boolean_t *retu)
|
||||
vm_handle_hlt(struct vm *vm, int vcpuid, boolean_t intr_disabled,
|
||||
boolean_t *retu)
|
||||
{
|
||||
struct vm_exit *vmexit;
|
||||
struct vcpu *vcpu;
|
||||
int sleepticks, t;
|
||||
|
||||
@ -888,12 +891,24 @@ vm_handle_hlt(struct vm *vm, int vcpuid, boolean_t *retu)
|
||||
* These interrupts could have happened any time after we
|
||||
* returned from VMRUN() and before we grabbed the vcpu lock.
|
||||
*/
|
||||
if (!vm_nmi_pending(vm, vcpuid) && lapic_pending_intr(vm, vcpuid) < 0) {
|
||||
if (!vm_nmi_pending(vm, vcpuid) &&
|
||||
(intr_disabled || vlapic_pending_intr(vcpu->vlapic) < 0)) {
|
||||
if (sleepticks <= 0)
|
||||
panic("invalid sleepticks %d", sleepticks);
|
||||
t = ticks;
|
||||
vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
|
||||
msleep_spin(vcpu, &vcpu->mtx, "vmidle", sleepticks);
|
||||
if (vlapic_enabled(vcpu->vlapic)) {
|
||||
msleep_spin(vcpu, &vcpu->mtx, "vmidle", sleepticks);
|
||||
} else {
|
||||
/*
|
||||
* Spindown the vcpu if the apic is disabled and it
|
||||
* had entered the halted state.
|
||||
*/
|
||||
*retu = TRUE;
|
||||
vmexit = vm_exitinfo(vm, vcpuid);
|
||||
vmexit->exitcode = VM_EXITCODE_SPINDOWN_CPU;
|
||||
VCPU_CTR0(vm, vcpuid, "spinning down cpu");
|
||||
}
|
||||
vcpu_require_state_locked(vcpu, VCPU_FROZEN);
|
||||
vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
|
||||
}
|
||||
@ -1003,7 +1018,7 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
|
||||
struct pcb *pcb;
|
||||
uint64_t tscval, rip;
|
||||
struct vm_exit *vme;
|
||||
boolean_t retu;
|
||||
boolean_t retu, intr_disabled;
|
||||
pmap_t pmap;
|
||||
|
||||
vcpuid = vmrun->cpuid;
|
||||
@ -1046,7 +1061,11 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
|
||||
retu = FALSE;
|
||||
switch (vme->exitcode) {
|
||||
case VM_EXITCODE_HLT:
|
||||
error = vm_handle_hlt(vm, vcpuid, &retu);
|
||||
if ((vme->u.hlt.rflags & PSL_I) == 0)
|
||||
intr_disabled = TRUE;
|
||||
else
|
||||
intr_disabled = FALSE;
|
||||
error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
|
||||
break;
|
||||
case VM_EXITCODE_PAGING:
|
||||
error = vm_handle_paging(vm, vcpuid, &retu);
|
||||
|
@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/mman.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include <machine/atomic.h>
|
||||
#include <machine/segments.h>
|
||||
|
||||
#include <stdio.h>
|
||||
@ -85,8 +86,6 @@ static int pincpu = -1;
|
||||
static int guest_vmexit_on_hlt, guest_vmexit_on_pause, disable_x2apic;
|
||||
static int virtio_msix = 1;
|
||||
|
||||
static int foundcpus;
|
||||
|
||||
static int strictio;
|
||||
|
||||
static int acpi;
|
||||
@ -210,8 +209,7 @@ fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
cpumask |= 1 << vcpu;
|
||||
foundcpus++;
|
||||
atomic_set_int(&cpumask, 1 << vcpu);
|
||||
|
||||
/*
|
||||
* Set up the vmexit struct to allow execution to start
|
||||
@ -228,6 +226,20 @@ fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
|
||||
assert(error == 0);
|
||||
}
|
||||
|
||||
static int
|
||||
fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
|
||||
{
|
||||
|
||||
if ((cpumask & (1 << vcpu)) == 0) {
|
||||
fprintf(stderr, "addcpu: attempting to delete unknown cpu %d\n",
|
||||
vcpu);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
atomic_clear_int(&cpumask, 1 << vcpu);
|
||||
return (cpumask == 0);
|
||||
}
|
||||
|
||||
static int
|
||||
vmexit_catch_reset(void)
|
||||
{
|
||||
@ -326,6 +338,17 @@ vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
|
||||
return (retval);
|
||||
}
|
||||
|
||||
static int
|
||||
vmexit_spindown_cpu(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
|
||||
{
|
||||
int lastcpu;
|
||||
|
||||
lastcpu = fbsdrun_deletecpu(ctx, *pvcpu);
|
||||
if (!lastcpu)
|
||||
pthread_exit(NULL);
|
||||
return (vmexit_catch_reset());
|
||||
}
|
||||
|
||||
static int
|
||||
vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
|
||||
{
|
||||
@ -417,6 +440,7 @@ static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
|
||||
[VM_EXITCODE_MTRAP] = vmexit_mtrap,
|
||||
[VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
|
||||
[VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
|
||||
[VM_EXITCODE_SPINDOWN_CPU] = vmexit_spindown_cpu,
|
||||
};
|
||||
|
||||
static void
|
||||
|
Loading…
Reference in New Issue
Block a user