Optimize the common case of injecting an interrupt into a vcpu after a HLT
by explicitly moving it out of the interrupt shadow. The hypervisor is done "executing" the HLT and by definition this moves the vcpu out of the 1-instruction interrupt shadow. Prior to this change the interrupt would be held pending because the VMCS guest-interruptibility-state would indicate that "blocking by STI" was in effect. This resulted in an unnecessary round trip into the guest before the pending interrupt could be injected. Reviewed by: grehan
This commit is contained in:
parent
3eb2c201a6
commit
d181963296
@ -82,6 +82,7 @@ enum vm_reg_name {
|
|||||||
VM_REG_GUEST_PDPTE1,
|
VM_REG_GUEST_PDPTE1,
|
||||||
VM_REG_GUEST_PDPTE2,
|
VM_REG_GUEST_PDPTE2,
|
||||||
VM_REG_GUEST_PDPTE3,
|
VM_REG_GUEST_PDPTE3,
|
||||||
|
VM_REG_GUEST_INTR_SHADOW,
|
||||||
VM_REG_LAST
|
VM_REG_LAST
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2711,6 +2711,46 @@ vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
|
|||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
|
||||||
|
{
|
||||||
|
uint64_t gi;
|
||||||
|
int error;
|
||||||
|
|
||||||
|
error = vmcs_getreg(&vmx->vmcs[vcpu], running,
|
||||||
|
VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
|
||||||
|
*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
|
||||||
|
return (error);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
|
||||||
|
{
|
||||||
|
struct vmcs *vmcs;
|
||||||
|
uint64_t gi;
|
||||||
|
int error, ident;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Forcing the vcpu into an interrupt shadow is not supported.
|
||||||
|
*/
|
||||||
|
if (val) {
|
||||||
|
error = EINVAL;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
vmcs = &vmx->vmcs[vcpu];
|
||||||
|
ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
|
||||||
|
error = vmcs_getreg(vmcs, running, ident, &gi);
|
||||||
|
if (error == 0) {
|
||||||
|
gi &= ~HWINTR_BLOCKING;
|
||||||
|
error = vmcs_setreg(vmcs, running, ident, gi);
|
||||||
|
}
|
||||||
|
done:
|
||||||
|
VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
|
||||||
|
error ? "failed" : "succeeded");
|
||||||
|
return (error);
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
vmx_shadow_reg(int reg)
|
vmx_shadow_reg(int reg)
|
||||||
{
|
{
|
||||||
@ -2742,6 +2782,9 @@ vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
|
|||||||
if (running && hostcpu != curcpu)
|
if (running && hostcpu != curcpu)
|
||||||
panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
|
panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
|
||||||
|
|
||||||
|
if (reg == VM_REG_GUEST_INTR_SHADOW)
|
||||||
|
return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
|
||||||
|
|
||||||
if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
|
if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
@ -2760,6 +2803,9 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
|
|||||||
if (running && hostcpu != curcpu)
|
if (running && hostcpu != curcpu)
|
||||||
panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
|
panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
|
||||||
|
|
||||||
|
if (reg == VM_REG_GUEST_INTR_SHADOW)
|
||||||
|
return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
|
||||||
|
|
||||||
if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
|
if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
|
@ -1090,7 +1090,7 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
|
|||||||
{
|
{
|
||||||
struct vcpu *vcpu;
|
struct vcpu *vcpu;
|
||||||
const char *wmesg;
|
const char *wmesg;
|
||||||
int t, vcpu_halted, vm_halted;
|
int error, t, vcpu_halted, vm_halted;
|
||||||
|
|
||||||
KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
|
KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
|
||||||
|
|
||||||
@ -1098,6 +1098,22 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
|
|||||||
vcpu_halted = 0;
|
vcpu_halted = 0;
|
||||||
vm_halted = 0;
|
vm_halted = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The typical way to halt a cpu is to execute: "sti; hlt"
|
||||||
|
*
|
||||||
|
* STI sets RFLAGS.IF to enable interrupts. However, the processor
|
||||||
|
* remains in an "interrupt shadow" for an additional instruction
|
||||||
|
* following the STI. This guarantees that "sti; hlt" sequence is
|
||||||
|
* atomic and a pending interrupt will be recognized after the HLT.
|
||||||
|
*
|
||||||
|
* After the HLT emulation is done the vcpu is no longer in an
|
||||||
|
* interrupt shadow and a pending interrupt can be injected on
|
||||||
|
* the next entry into the guest.
|
||||||
|
*/
|
||||||
|
error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
|
||||||
|
KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
|
||||||
|
__func__, error));
|
||||||
|
|
||||||
vcpu_lock(vcpu);
|
vcpu_lock(vcpu);
|
||||||
while (1) {
|
while (1) {
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user