Disable global interrupts early so all the software state maintained by bhyve

is sampled "atomically". Any interrupts after this point will be held pending
by the CPU until the guest starts executing and will immediately trigger a
#VMEXIT.

Reviewed by:	Anish Gupta (akgupt3@gmail.com)
This commit is contained in:
Neel Natu 2014-06-11 17:48:07 +00:00
parent 3787148758
commit 4e98fc9011
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bhyve_svm/; revision=267367

View File

@ -1156,22 +1156,24 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
do { do {
vmexit->inst_length = 0; vmexit->inst_length = 0;
/* We are asked to give the cpu by scheduler. */
if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) { /*
vmexit->exitcode = VM_EXITCODE_BOGUS; * Disable global interrupts to guarantee atomicity during
vmm_stat_incr(vm, vcpu, VMEXIT_ASTPENDING, 1); * loading of guest state. This includes not only the state
VCPU_CTR1(vm, vcpu, * loaded by the "vmrun" instruction but also software state
"SVM: ASTPENDING, RIP:0x%lx\n", state->rip); * maintained by the hypervisor: suspended and rendezvous
vmexit->rip = state->rip; * state, NPT generation number, vlapic interrupts etc.
break; */
} disable_gintr();
if (vcpu_suspended(suspended_cookie)) { if (vcpu_suspended(suspended_cookie)) {
enable_gintr();
vm_exit_suspended(vm, vcpu, state->rip); vm_exit_suspended(vm, vcpu, state->rip);
break; break;
} }
if (vcpu_rendezvous_pending(rend_cookie)) { if (vcpu_rendezvous_pending(rend_cookie)) {
enable_gintr();
vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
vmm_stat_incr(vm, vcpu, VMEXIT_RENDEZVOUS, 1); vmm_stat_incr(vm, vcpu, VMEXIT_RENDEZVOUS, 1);
VCPU_CTR1(vm, vcpu, VCPU_CTR1(vm, vcpu,
@ -1181,32 +1183,36 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
break; break;
} }
/* We are asked to give the cpu by scheduler. */
if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
enable_gintr();
vmexit->exitcode = VM_EXITCODE_BOGUS;
vmm_stat_incr(vm, vcpu, VMEXIT_ASTPENDING, 1);
VCPU_CTR1(vm, vcpu,
"SVM: ASTPENDING, RIP:0x%lx\n", state->rip);
vmexit->rip = state->rip;
break;
}
(void)svm_set_vmcb(svm_get_vmcb(svm_sc, vcpu), svm_sc->asid); (void)svm_set_vmcb(svm_get_vmcb(svm_sc, vcpu), svm_sc->asid);
svm_handle_exitintinfo(svm_sc, vcpu); svm_handle_exitintinfo(svm_sc, vcpu);
(void)svm_inj_interrupts(svm_sc, vcpu, vlapic); (void)svm_inj_interrupts(svm_sc, vcpu, vlapic);
/* Change TSS type to available.*/ /* Change TSS type to available.*/
setup_tss_type(); setup_tss_type();
/*
* Disable global interrupt to guarantee atomicity
* during loading of guest state.
* See 15.5.1 "Loading guest state" APM2.
*/
disable_gintr();
/* Launch Virtual Machine. */ /* Launch Virtual Machine. */
svm_launch(vmcb_pa, gctx, hctx); svm_launch(vmcb_pa, gctx, hctx);
/* /*
* Only GDTR and IDTR of host is saved and restore by SVM, * Only GDTR and IDTR of host is saved and restore by SVM,
* LDTR and TR need to be restored by VMM. * LDTR and TR need to be restored by VMM.
* XXX: kernel doesn't use LDT, only user space. * XXX: kernel doesn't use LDT, only user space.
*/ */
ltr(GSEL(GPROC0_SEL, SEL_KPL)); ltr(GSEL(GPROC0_SEL, SEL_KPL));
/* /*
* Guest FS and GS selector are stashed by vmload and vmsave. * Guest FS and GS selector are stashed by vmload and vmsave.
* Host FS and GS selector are stashed by svm_launch(). * Host FS and GS selector are stashed by svm_launch().
@ -1220,15 +1226,14 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
*/ */
wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]); wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]);
wrmsr(MSR_KGSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]); wrmsr(MSR_KGSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]);
/* vcpu exit with glbal interrupt disabled. */ /* #VMEXIT disables interrupts so re-enable them here. */
enable_gintr(); enable_gintr();
/* Handle #VMEXIT and if required return to user space. */ /* Handle #VMEXIT and if required return to user space. */
loop = svm_vmexit(svm_sc, vcpu, vmexit); loop = svm_vmexit(svm_sc, vcpu, vmexit);
vcpustate->loop++; vcpustate->loop++;
vmm_stat_incr(vm, vcpu, VMEXIT_COUNT, 1); vmm_stat_incr(vm, vcpu, VMEXIT_COUNT, 1);
} while (loop); } while (loop);
return (0); return (0);