Restructure the VMX code to enter and exit the guest. In large part this change
hides the setjmp/longjmp semantics of VM enter/exit. vmx_enter_guest() is used to enter guest context and vmx_exit_guest() is used to transition back into host context. Fix a longstanding race where a vcpu interrupt notification might be ignored if it happens after vmx_inject_interrupts() but before host interrupts are disabled in vmx_resume/vmx_launch. We now called vmx_inject_interrupts() with host interrupts disabled to prevent this. Suggested by: grehan@
This commit is contained in:
parent
e1b5472915
commit
0492757c70
@ -303,9 +303,19 @@ struct vm_exit {
|
|||||||
* exitcode to represent the VM-exit.
|
* exitcode to represent the VM-exit.
|
||||||
*/
|
*/
|
||||||
struct {
|
struct {
|
||||||
int error; /* vmx inst error */
|
int status; /* vmx inst status */
|
||||||
|
/*
|
||||||
|
* 'exit_reason' and 'exit_qualification' are valid
|
||||||
|
* only if 'status' is zero.
|
||||||
|
*/
|
||||||
uint32_t exit_reason;
|
uint32_t exit_reason;
|
||||||
uint64_t exit_qualification;
|
uint64_t exit_qualification;
|
||||||
|
/*
|
||||||
|
* 'inst_error' and 'inst_type' are valid
|
||||||
|
* only if 'status' is non-zero.
|
||||||
|
*/
|
||||||
|
int inst_type;
|
||||||
|
int inst_error;
|
||||||
} vmx;
|
} vmx;
|
||||||
struct {
|
struct {
|
||||||
uint32_t code; /* ecx value */
|
uint32_t code; /* ecx value */
|
||||||
|
@ -288,82 +288,6 @@ exit_reason_to_str(int reason)
|
|||||||
return (reasonbuf);
|
return (reasonbuf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef SETJMP_TRACE
|
|
||||||
static const char *
|
|
||||||
vmx_setjmp_rc2str(int rc)
|
|
||||||
{
|
|
||||||
switch (rc) {
|
|
||||||
case VMX_RETURN_DIRECT:
|
|
||||||
return "direct";
|
|
||||||
case VMX_RETURN_LONGJMP:
|
|
||||||
return "longjmp";
|
|
||||||
case VMX_RETURN_VMRESUME:
|
|
||||||
return "vmresume";
|
|
||||||
case VMX_RETURN_VMLAUNCH:
|
|
||||||
return "vmlaunch";
|
|
||||||
case VMX_RETURN_AST:
|
|
||||||
return "ast";
|
|
||||||
default:
|
|
||||||
return "unknown";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define SETJMP_TRACE(vmx, vcpu, vmxctx, regname) \
|
|
||||||
VCPU_CTR1((vmx)->vm, (vcpu), "setjmp trace " #regname " 0x%016lx", \
|
|
||||||
(vmxctx)->regname)
|
|
||||||
|
|
||||||
static void
|
|
||||||
vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
|
|
||||||
{
|
|
||||||
uint64_t host_rip, host_rsp;
|
|
||||||
|
|
||||||
if (vmxctx != &vmx->ctx[vcpu])
|
|
||||||
panic("vmx_setjmp_trace: invalid vmxctx %p; should be %p",
|
|
||||||
vmxctx, &vmx->ctx[vcpu]);
|
|
||||||
|
|
||||||
VCPU_CTR1((vmx)->vm, (vcpu), "vmxctx = %p", vmxctx);
|
|
||||||
VCPU_CTR2((vmx)->vm, (vcpu), "setjmp return code %s(%d)",
|
|
||||||
vmx_setjmp_rc2str(rc), rc);
|
|
||||||
|
|
||||||
host_rip = vmcs_read(VMCS_HOST_RIP);
|
|
||||||
host_rsp = vmcs_read(VMCS_HOST_RSP);
|
|
||||||
VCPU_CTR2((vmx)->vm, (vcpu), "vmcs host_rip 0x%016lx, host_rsp %#lx",
|
|
||||||
host_rip, host_rsp);
|
|
||||||
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, host_r15);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, host_r14);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, host_r13);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, host_r12);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbp);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, host_rsp);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbx);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, host_rip);
|
|
||||||
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdi);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rsi);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdx);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rcx);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r8);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r9);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rax);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbx);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbp);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r10);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r11);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r12);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r13);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r14);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r15);
|
|
||||||
SETJMP_TRACE(vmx, vcpu, vmxctx, guest_cr2);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#else
|
|
||||||
static void __inline
|
|
||||||
vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif /* KTR */
|
#endif /* KTR */
|
||||||
|
|
||||||
u_long
|
u_long
|
||||||
@ -827,7 +751,7 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
|
|||||||
}
|
}
|
||||||
|
|
||||||
error = vmcs_set_defaults(&vmx->vmcs[i],
|
error = vmcs_set_defaults(&vmx->vmcs[i],
|
||||||
(u_long)vmx_longjmp,
|
(u_long)vmx_exit_guest,
|
||||||
(u_long)&vmx->ctx[i],
|
(u_long)&vmx->ctx[i],
|
||||||
vmx->eptp,
|
vmx->eptp,
|
||||||
pinbased_ctls,
|
pinbased_ctls,
|
||||||
@ -1257,15 +1181,20 @@ static int
|
|||||||
vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||||
{
|
{
|
||||||
int error, handled;
|
int error, handled;
|
||||||
struct vmcs *vmcs;
|
|
||||||
struct vmxctx *vmxctx;
|
struct vmxctx *vmxctx;
|
||||||
uint32_t eax, ecx, edx, idtvec_info, idtvec_err, reason;
|
uint32_t eax, ecx, edx, idtvec_info, idtvec_err, reason;
|
||||||
uint64_t qual, gpa;
|
uint64_t qual, gpa;
|
||||||
bool retu;
|
bool retu;
|
||||||
|
|
||||||
handled = 0;
|
handled = 0;
|
||||||
vmcs = &vmx->vmcs[vcpu];
|
|
||||||
vmxctx = &vmx->ctx[vcpu];
|
vmxctx = &vmx->ctx[vcpu];
|
||||||
|
|
||||||
|
/* Collect some information for VM exit processing */
|
||||||
|
vmexit->rip = vmcs_guest_rip();
|
||||||
|
vmexit->inst_length = vmexit_instruction_length();
|
||||||
|
vmexit->u.vmx.exit_reason = vmcs_exit_reason();
|
||||||
|
vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
|
||||||
|
|
||||||
qual = vmexit->u.vmx.exit_qualification;
|
qual = vmexit->u.vmx.exit_qualification;
|
||||||
reason = vmexit->u.vmx.exit_reason;
|
reason = vmexit->u.vmx.exit_reason;
|
||||||
vmexit->exitcode = VM_EXITCODE_BOGUS;
|
vmexit->exitcode = VM_EXITCODE_BOGUS;
|
||||||
@ -1442,7 +1371,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
|||||||
* treat it as a generic VMX exit.
|
* treat it as a generic VMX exit.
|
||||||
*/
|
*/
|
||||||
vmexit->exitcode = VM_EXITCODE_VMX;
|
vmexit->exitcode = VM_EXITCODE_VMX;
|
||||||
vmexit->u.vmx.error = 0;
|
vmexit->u.vmx.status = VM_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The exitcode and collateral have been populated.
|
* The exitcode and collateral have been populated.
|
||||||
@ -1453,11 +1382,53 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
|||||||
return (handled);
|
return (handled);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static __inline int
|
||||||
vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap)
|
vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||||
{
|
{
|
||||||
int vie, rc, handled, astpending;
|
|
||||||
uint32_t exit_reason;
|
vmexit->rip = vmcs_guest_rip();
|
||||||
|
vmexit->inst_length = 0;
|
||||||
|
vmexit->exitcode = VM_EXITCODE_BOGUS;
|
||||||
|
vmx_astpending_trace(vmx, vcpu, vmexit->rip);
|
||||||
|
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
|
||||||
|
|
||||||
|
return (HANDLED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __inline int
|
||||||
|
vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
|
||||||
|
{
|
||||||
|
|
||||||
|
KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
|
||||||
|
("vmx_exit_inst_error: invalid inst_fail_status %d",
|
||||||
|
vmxctx->inst_fail_status));
|
||||||
|
|
||||||
|
vmexit->rip = vmcs_guest_rip();
|
||||||
|
vmexit->inst_length = 0;
|
||||||
|
|
||||||
|
vmexit->exitcode = VM_EXITCODE_VMX;
|
||||||
|
vmexit->u.vmx.status = vmxctx->inst_fail_status;
|
||||||
|
vmexit->u.vmx.inst_error = vmcs_instruction_error();
|
||||||
|
vmexit->u.vmx.exit_reason = ~0;
|
||||||
|
vmexit->u.vmx.exit_qualification = ~0;
|
||||||
|
|
||||||
|
switch (rc) {
|
||||||
|
case VMX_VMRESUME_ERROR:
|
||||||
|
case VMX_VMLAUNCH_ERROR:
|
||||||
|
case VMX_INVEPT_ERROR:
|
||||||
|
vmexit->u.vmx.inst_type = rc;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (UNHANDLED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap)
|
||||||
|
{
|
||||||
|
int rc, handled, launched;
|
||||||
struct vmx *vmx;
|
struct vmx *vmx;
|
||||||
struct vmxctx *vmxctx;
|
struct vmxctx *vmxctx;
|
||||||
struct vmcs *vmcs;
|
struct vmcs *vmcs;
|
||||||
@ -1467,20 +1438,15 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap)
|
|||||||
vmx = arg;
|
vmx = arg;
|
||||||
vmcs = &vmx->vmcs[vcpu];
|
vmcs = &vmx->vmcs[vcpu];
|
||||||
vmxctx = &vmx->ctx[vcpu];
|
vmxctx = &vmx->ctx[vcpu];
|
||||||
vmxctx->launched = 0;
|
|
||||||
vlapic = vm_lapic(vmx->vm, vcpu);
|
vlapic = vm_lapic(vmx->vm, vcpu);
|
||||||
|
|
||||||
astpending = 0;
|
|
||||||
vmexit = vm_exitinfo(vmx->vm, vcpu);
|
vmexit = vm_exitinfo(vmx->vm, vcpu);
|
||||||
|
launched = 0;
|
||||||
|
|
||||||
KASSERT(vmxctx->pmap == pmap,
|
KASSERT(vmxctx->pmap == pmap,
|
||||||
("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
|
("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
|
||||||
KASSERT(vmxctx->eptp == vmx->eptp,
|
KASSERT(vmxctx->eptp == vmx->eptp,
|
||||||
("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp));
|
("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp));
|
||||||
|
|
||||||
/*
|
|
||||||
* XXX Can we avoid doing this every time we do a vm run?
|
|
||||||
*/
|
|
||||||
VMPTRLD(vmcs);
|
VMPTRLD(vmcs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1492,76 +1458,48 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap)
|
|||||||
* of a single process we could do this once in vmcs_set_defaults().
|
* of a single process we could do this once in vmcs_set_defaults().
|
||||||
*/
|
*/
|
||||||
vmcs_write(VMCS_HOST_CR3, rcr3());
|
vmcs_write(VMCS_HOST_CR3, rcr3());
|
||||||
vmcs_write(VMCS_GUEST_RIP, rip);
|
|
||||||
vmx_set_pcpu_defaults(vmx, vcpu);
|
|
||||||
|
|
||||||
|
vmcs_write(VMCS_GUEST_RIP, startrip);
|
||||||
|
vmx_set_pcpu_defaults(vmx, vcpu);
|
||||||
do {
|
do {
|
||||||
|
/*
|
||||||
|
* Interrupts are disabled from this point on until the
|
||||||
|
* guest starts executing. This is done for the following
|
||||||
|
* reasons:
|
||||||
|
*
|
||||||
|
* If an AST is asserted on this thread after the check below,
|
||||||
|
* then the IPI_AST notification will not be lost, because it
|
||||||
|
* will cause a VM exit due to external interrupt as soon as
|
||||||
|
* the guest state is loaded.
|
||||||
|
*
|
||||||
|
* A posted interrupt after 'vmx_inject_interrupts()' will
|
||||||
|
* not be "lost" because it will be held pending in the host
|
||||||
|
* APIC because interrupts are disabled. The pending interrupt
|
||||||
|
* will be recognized as soon as the guest state is loaded.
|
||||||
|
*
|
||||||
|
* The same reasoning applies to the IPI generated by
|
||||||
|
* pmap_invalidate_ept().
|
||||||
|
*/
|
||||||
|
disable_intr();
|
||||||
|
if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
|
||||||
|
enable_intr();
|
||||||
|
handled = vmx_exit_astpending(vmx, vcpu, vmexit);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
vmx_inject_interrupts(vmx, vcpu, vlapic);
|
vmx_inject_interrupts(vmx, vcpu, vlapic);
|
||||||
vmx_run_trace(vmx, vcpu);
|
vmx_run_trace(vmx, vcpu);
|
||||||
rc = vmx_setjmp(vmxctx);
|
rc = vmx_enter_guest(vmxctx, launched);
|
||||||
#ifdef SETJMP_TRACE
|
|
||||||
vmx_setjmp_trace(vmx, vcpu, vmxctx, rc);
|
|
||||||
#endif
|
|
||||||
switch (rc) {
|
|
||||||
case VMX_RETURN_DIRECT:
|
|
||||||
if (vmxctx->launched == 0) {
|
|
||||||
vmxctx->launched = 1;
|
|
||||||
vmx_launch(vmxctx);
|
|
||||||
} else
|
|
||||||
vmx_resume(vmxctx);
|
|
||||||
panic("vmx_launch/resume should not return");
|
|
||||||
break;
|
|
||||||
case VMX_RETURN_LONGJMP:
|
|
||||||
break; /* vm exit */
|
|
||||||
case VMX_RETURN_AST:
|
|
||||||
astpending = 1;
|
|
||||||
break;
|
|
||||||
case VMX_RETURN_VMRESUME:
|
|
||||||
vie = vmcs_instruction_error();
|
|
||||||
if (vmxctx->launch_error == VM_FAIL_INVALID ||
|
|
||||||
vie != VMRESUME_WITH_NON_LAUNCHED_VMCS) {
|
|
||||||
printf("vmresume error %d vmcs inst error %d\n",
|
|
||||||
vmxctx->launch_error, vie);
|
|
||||||
goto err_exit;
|
|
||||||
}
|
|
||||||
vmx_launch(vmxctx); /* try to launch the guest */
|
|
||||||
panic("vmx_launch should not return");
|
|
||||||
break;
|
|
||||||
case VMX_RETURN_VMLAUNCH:
|
|
||||||
vie = vmcs_instruction_error();
|
|
||||||
#if 1
|
|
||||||
printf("vmlaunch error %d vmcs inst error %d\n",
|
|
||||||
vmxctx->launch_error, vie);
|
|
||||||
#endif
|
|
||||||
goto err_exit;
|
|
||||||
case VMX_RETURN_INVEPT:
|
|
||||||
panic("vm %s:%d invept error %d",
|
|
||||||
vm_name(vmx->vm), vcpu, vmxctx->launch_error);
|
|
||||||
default:
|
|
||||||
panic("vmx_setjmp returned %d", rc);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* enable interrupts */
|
|
||||||
enable_intr();
|
enable_intr();
|
||||||
|
if (rc == VMX_GUEST_VMEXIT) {
|
||||||
/* collect some basic information for VM exit processing */
|
launched = 1;
|
||||||
vmexit->rip = rip = vmcs_guest_rip();
|
handled = vmx_exit_process(vmx, vcpu, vmexit);
|
||||||
vmexit->inst_length = vmexit_instruction_length();
|
} else {
|
||||||
vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
|
handled = vmx_exit_inst_error(vmxctx, rc, vmexit);
|
||||||
vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
|
|
||||||
|
|
||||||
if (astpending) {
|
|
||||||
handled = 1;
|
|
||||||
vmexit->inst_length = 0;
|
|
||||||
vmexit->exitcode = VM_EXITCODE_BOGUS;
|
|
||||||
vmx_astpending_trace(vmx, vcpu, rip);
|
|
||||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
handled = vmx_exit_process(vmx, vcpu, vmexit);
|
vmx_exit_trace(vmx, vcpu, vmexit->rip,
|
||||||
vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
|
vmexit->u.vmx.exit_reason, handled);
|
||||||
|
|
||||||
} while (handled);
|
} while (handled);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1577,26 +1515,11 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap)
|
|||||||
if (!handled)
|
if (!handled)
|
||||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1);
|
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1);
|
||||||
|
|
||||||
VCPU_CTR1(vmx->vm, vcpu, "goto userland: exitcode %d",vmexit->exitcode);
|
VCPU_CTR1(vmx->vm, vcpu, "returning from vmx_run: exitcode %d",
|
||||||
|
vmexit->exitcode);
|
||||||
|
|
||||||
/*
|
|
||||||
* XXX
|
|
||||||
* We need to do this to ensure that any VMCS state cached by the
|
|
||||||
* processor is flushed to memory. We need to do this in case the
|
|
||||||
* VM moves to a different cpu the next time it runs.
|
|
||||||
*
|
|
||||||
* Can we avoid doing this?
|
|
||||||
*/
|
|
||||||
VMCLEAR(vmcs);
|
VMCLEAR(vmcs);
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
err_exit:
|
|
||||||
vmexit->exitcode = VM_EXITCODE_VMX;
|
|
||||||
vmexit->u.vmx.exit_reason = (uint32_t)-1;
|
|
||||||
vmexit->u.vmx.exit_qualification = (uint32_t)-1;
|
|
||||||
vmexit->u.vmx.error = vie;
|
|
||||||
VMCLEAR(vmcs);
|
|
||||||
return (ENOEXEC);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -36,9 +36,6 @@ struct pmap;
|
|||||||
#define GUEST_MSR_MAX_ENTRIES 64 /* arbitrary */
|
#define GUEST_MSR_MAX_ENTRIES 64 /* arbitrary */
|
||||||
|
|
||||||
struct vmxctx {
|
struct vmxctx {
|
||||||
register_t tmpstk[32]; /* vmx_return() stack */
|
|
||||||
register_t tmpstktop;
|
|
||||||
|
|
||||||
register_t guest_rdi; /* Guest state */
|
register_t guest_rdi; /* Guest state */
|
||||||
register_t guest_rsi;
|
register_t guest_rsi;
|
||||||
register_t guest_rdx;
|
register_t guest_rdx;
|
||||||
@ -68,8 +65,7 @@ struct vmxctx {
|
|||||||
* XXX todo debug registers and fpu state
|
* XXX todo debug registers and fpu state
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int launched; /* vmcs launch state */
|
int inst_fail_status;
|
||||||
int launch_error;
|
|
||||||
|
|
||||||
long eptgen[MAXCPU]; /* cached pmap->pm_eptgen */
|
long eptgen[MAXCPU]; /* cached pmap->pm_eptgen */
|
||||||
|
|
||||||
@ -113,25 +109,12 @@ CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
|
|||||||
CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
|
CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
|
||||||
CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
|
CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
|
||||||
|
|
||||||
#define VMX_RETURN_DIRECT 0
|
#define VMX_GUEST_VMEXIT 0
|
||||||
#define VMX_RETURN_LONGJMP 1
|
#define VMX_VMRESUME_ERROR 1
|
||||||
#define VMX_RETURN_VMRESUME 2
|
#define VMX_VMLAUNCH_ERROR 2
|
||||||
#define VMX_RETURN_VMLAUNCH 3
|
#define VMX_INVEPT_ERROR 3
|
||||||
#define VMX_RETURN_AST 4
|
int vmx_enter_guest(struct vmxctx *ctx, int launched);
|
||||||
#define VMX_RETURN_INVEPT 5
|
void vmx_exit_guest(void);
|
||||||
/*
|
|
||||||
* vmx_setjmp() returns:
|
|
||||||
* - 0 when it returns directly
|
|
||||||
* - 1 when it returns from vmx_longjmp
|
|
||||||
* - 2 when it returns from vmx_resume (which would only be in the error case)
|
|
||||||
* - 3 when it returns from vmx_launch (which would only be in the error case)
|
|
||||||
* - 4 when it returns from vmx_resume or vmx_launch because of AST pending
|
|
||||||
* - 5 when it returns from vmx_launch/vmx_resume because of invept error
|
|
||||||
*/
|
|
||||||
int vmx_setjmp(struct vmxctx *ctx);
|
|
||||||
void vmx_longjmp(void); /* returns via vmx_setjmp */
|
|
||||||
void vmx_launch(struct vmxctx *ctx) __dead2; /* may return via vmx_setjmp */
|
|
||||||
void vmx_resume(struct vmxctx *ctx) __dead2; /* may return via vmx_setjmp */
|
|
||||||
|
|
||||||
u_long vmx_fix_cr0(u_long cr0);
|
u_long vmx_fix_cr0(u_long cr0);
|
||||||
u_long vmx_fix_cr4(u_long cr4);
|
u_long vmx_fix_cr4(u_long cr4);
|
||||||
|
@ -31,7 +31,6 @@ __FBSDID("$FreeBSD$");
|
|||||||
|
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
#include <sys/systm.h>
|
#include <sys/systm.h>
|
||||||
#include <sys/malloc.h>
|
|
||||||
#include <sys/proc.h>
|
#include <sys/proc.h>
|
||||||
#include <sys/assym.h>
|
#include <sys/assym.h>
|
||||||
|
|
||||||
@ -42,7 +41,6 @@ __FBSDID("$FreeBSD$");
|
|||||||
#include "vmx_cpufunc.h"
|
#include "vmx_cpufunc.h"
|
||||||
#include "vmx.h"
|
#include "vmx.h"
|
||||||
|
|
||||||
ASSYM(VMXCTX_TMPSTKTOP, offsetof(struct vmxctx, tmpstktop));
|
|
||||||
ASSYM(VMXCTX_GUEST_RDI, offsetof(struct vmxctx, guest_rdi));
|
ASSYM(VMXCTX_GUEST_RDI, offsetof(struct vmxctx, guest_rdi));
|
||||||
ASSYM(VMXCTX_GUEST_RSI, offsetof(struct vmxctx, guest_rsi));
|
ASSYM(VMXCTX_GUEST_RSI, offsetof(struct vmxctx, guest_rsi));
|
||||||
ASSYM(VMXCTX_GUEST_RDX, offsetof(struct vmxctx, guest_rdx));
|
ASSYM(VMXCTX_GUEST_RDX, offsetof(struct vmxctx, guest_rdx));
|
||||||
@ -69,27 +67,19 @@ ASSYM(VMXCTX_HOST_RSP, offsetof(struct vmxctx, host_rsp));
|
|||||||
ASSYM(VMXCTX_HOST_RBX, offsetof(struct vmxctx, host_rbx));
|
ASSYM(VMXCTX_HOST_RBX, offsetof(struct vmxctx, host_rbx));
|
||||||
ASSYM(VMXCTX_HOST_RIP, offsetof(struct vmxctx, host_rip));
|
ASSYM(VMXCTX_HOST_RIP, offsetof(struct vmxctx, host_rip));
|
||||||
|
|
||||||
ASSYM(VMXCTX_LAUNCH_ERROR, offsetof(struct vmxctx, launch_error));
|
ASSYM(VMXCTX_INST_FAIL_STATUS, offsetof(struct vmxctx, inst_fail_status));
|
||||||
ASSYM(VMXCTX_EPTGEN, offsetof(struct vmxctx, eptgen));
|
ASSYM(VMXCTX_EPTGEN, offsetof(struct vmxctx, eptgen));
|
||||||
|
|
||||||
ASSYM(VMXCTX_PMAP, offsetof(struct vmxctx, pmap));
|
ASSYM(VMXCTX_PMAP, offsetof(struct vmxctx, pmap));
|
||||||
ASSYM(VMXCTX_EPTP, offsetof(struct vmxctx, eptp));
|
ASSYM(VMXCTX_EPTP, offsetof(struct vmxctx, eptp));
|
||||||
|
|
||||||
ASSYM(VM_SUCCESS, VM_SUCCESS);
|
|
||||||
ASSYM(VM_FAIL_INVALID, VM_FAIL_INVALID);
|
ASSYM(VM_FAIL_INVALID, VM_FAIL_INVALID);
|
||||||
ASSYM(VM_FAIL_VALID, VM_FAIL_VALID);
|
ASSYM(VM_FAIL_VALID, VM_FAIL_VALID);
|
||||||
|
ASSYM(VMX_GUEST_VMEXIT, VMX_GUEST_VMEXIT);
|
||||||
|
ASSYM(VMX_VMRESUME_ERROR, VMX_VMRESUME_ERROR);
|
||||||
|
ASSYM(VMX_VMLAUNCH_ERROR, VMX_VMLAUNCH_ERROR);
|
||||||
|
ASSYM(VMX_INVEPT_ERROR, VMX_INVEPT_ERROR);
|
||||||
|
|
||||||
ASSYM(VMX_RETURN_DIRECT, VMX_RETURN_DIRECT);
|
|
||||||
ASSYM(VMX_RETURN_LONGJMP, VMX_RETURN_LONGJMP);
|
|
||||||
ASSYM(VMX_RETURN_VMRESUME, VMX_RETURN_VMRESUME);
|
|
||||||
ASSYM(VMX_RETURN_VMLAUNCH, VMX_RETURN_VMLAUNCH);
|
|
||||||
ASSYM(VMX_RETURN_AST, VMX_RETURN_AST);
|
|
||||||
ASSYM(VMX_RETURN_INVEPT, VMX_RETURN_INVEPT);
|
|
||||||
|
|
||||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
|
||||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
|
||||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
|
||||||
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
|
|
||||||
ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
|
ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
|
||||||
|
|
||||||
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
/*-
|
/*-
|
||||||
* Copyright (c) 2011 NetApp, Inc.
|
* Copyright (c) 2011 NetApp, Inc.
|
||||||
|
* Copyright (c) 2013 Neel Natu <neel@freebsd.org>
|
||||||
* All rights reserved.
|
* All rights reserved.
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
@ -36,32 +37,6 @@
|
|||||||
#define LK
|
#define LK
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Disable interrupts before updating %rsp in VMX_CHECK_AST or
|
|
||||||
* VMX_GUEST_RESTORE.
|
|
||||||
*
|
|
||||||
* The location that %rsp points to is a 'vmxctx' and not a
|
|
||||||
* real stack so we don't want an interrupt handler to trash it
|
|
||||||
*/
|
|
||||||
#define VMX_DISABLE_INTERRUPTS cli
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the thread hosting the vcpu has an ast pending then take care of it
|
|
||||||
* by returning from vmx_setjmp() with a return value of VMX_RETURN_AST.
|
|
||||||
*
|
|
||||||
* Assumes that %rdi holds a pointer to the 'vmxctx' and that interrupts
|
|
||||||
* are disabled.
|
|
||||||
*/
|
|
||||||
#define VMX_CHECK_AST \
|
|
||||||
movq PCPU(CURTHREAD),%rax; \
|
|
||||||
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax); \
|
|
||||||
je 9f; \
|
|
||||||
movq $VMX_RETURN_AST,%rsi; \
|
|
||||||
movq %rdi,%rsp; \
|
|
||||||
addq $VMXCTX_TMPSTKTOP,%rsp; \
|
|
||||||
callq vmx_return; \
|
|
||||||
9:
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Assumes that %rdi holds a pointer to the 'vmxctx'.
|
* Assumes that %rdi holds a pointer to the 'vmxctx'.
|
||||||
*
|
*
|
||||||
@ -93,132 +68,132 @@
|
|||||||
movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
|
movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for an error after executing a VMX instruction.
|
* Save and restore the host context.
|
||||||
* 'errreg' will be zero on success and non-zero otherwise.
|
*
|
||||||
* 'ctxreg' points to the 'struct vmxctx' associated with the vcpu.
|
* Assumes that %rdi holds a pointer to the 'vmxctx'.
|
||||||
*/
|
*/
|
||||||
#define VM_INSTRUCTION_ERROR(errreg, ctxreg) \
|
#define VMX_HOST_SAVE(tmpreg) \
|
||||||
jnc 1f; \
|
movq (%rsp), tmpreg; /* return address */ \
|
||||||
movl $VM_FAIL_INVALID,errreg; /* CF is set */ \
|
movq %r15, VMXCTX_HOST_R15(%rdi); \
|
||||||
jmp 3f; \
|
movq %r14, VMXCTX_HOST_R14(%rdi); \
|
||||||
1: jnz 2f; \
|
movq %r13, VMXCTX_HOST_R13(%rdi); \
|
||||||
movl $VM_FAIL_VALID,errreg; /* ZF is set */ \
|
movq %r12, VMXCTX_HOST_R12(%rdi); \
|
||||||
jmp 3f; \
|
movq %rbp, VMXCTX_HOST_RBP(%rdi); \
|
||||||
2: movl $VM_SUCCESS,errreg; \
|
movq %rsp, VMXCTX_HOST_RSP(%rdi); \
|
||||||
3: movl errreg,VMXCTX_LAUNCH_ERROR(ctxreg)
|
movq %rbx, VMXCTX_HOST_RBX(%rdi); \
|
||||||
|
movq tmpreg, VMXCTX_HOST_RIP(%rdi)
|
||||||
|
|
||||||
|
#define VMX_HOST_RESTORE(tmpreg) \
|
||||||
|
movq VMXCTX_HOST_R15(%rdi), %r15; \
|
||||||
|
movq VMXCTX_HOST_R14(%rdi), %r14; \
|
||||||
|
movq VMXCTX_HOST_R13(%rdi), %r13; \
|
||||||
|
movq VMXCTX_HOST_R12(%rdi), %r12; \
|
||||||
|
movq VMXCTX_HOST_RBP(%rdi), %rbp; \
|
||||||
|
movq VMXCTX_HOST_RSP(%rdi), %rsp; \
|
||||||
|
movq VMXCTX_HOST_RBX(%rdi), %rbx; \
|
||||||
|
movq VMXCTX_HOST_RIP(%rdi), tmpreg; \
|
||||||
|
movq tmpreg, (%rsp) /* return address */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* set or clear the appropriate bit in 'pm_active'
|
* vmx_enter_guest(struct vmxctx *vmxctx, int launched)
|
||||||
* %rdi = vmxctx
|
* %rdi: pointer to the 'vmxctx'
|
||||||
* %rax, %r11 = scratch registers
|
* %esi: launch state of the VMCS
|
||||||
|
* Interrupts must be disabled on entry.
|
||||||
*/
|
*/
|
||||||
#define VMX_SET_PM_ACTIVE \
|
ENTRY(vmx_enter_guest)
|
||||||
movq VMXCTX_PMAP(%rdi), %r11; \
|
/*
|
||||||
movl PCPU(CPUID), %eax; \
|
* Save host state before doing anything else.
|
||||||
|
*/
|
||||||
|
VMX_HOST_SAVE(%r10)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Activate guest pmap on this cpu.
|
||||||
|
*/
|
||||||
|
movq VMXCTX_PMAP(%rdi), %r11
|
||||||
|
movl PCPU(CPUID), %eax
|
||||||
LK btsl %eax, PM_ACTIVE(%r11)
|
LK btsl %eax, PM_ACTIVE(%r11)
|
||||||
|
|
||||||
#define VMX_CLEAR_PM_ACTIVE \
|
/*
|
||||||
movq VMXCTX_PMAP(%rdi), %r11; \
|
* If 'vmxctx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
|
||||||
movl PCPU(CPUID), %eax; \
|
* then we must invalidate all mappings associated with this EPTP.
|
||||||
LK btrl %eax, PM_ACTIVE(%r11)
|
*/
|
||||||
|
movq PM_EPTGEN(%r11), %r10
|
||||||
|
cmpq %r10, VMXCTX_EPTGEN(%rdi, %rax, 8)
|
||||||
|
je guest_restore
|
||||||
|
|
||||||
/*
|
/* Refresh 'vmxctx->eptgen[curcpu]' */
|
||||||
* If 'vmxctx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
|
movq %r10, VMXCTX_EPTGEN(%rdi, %rax, 8)
|
||||||
* then we must invalidate all mappings associated with this eptp.
|
|
||||||
*
|
|
||||||
* %rdi = vmxctx
|
|
||||||
* %rax, %rbx, %r11 = scratch registers
|
|
||||||
*/
|
|
||||||
#define VMX_CHECK_EPTGEN \
|
|
||||||
movl PCPU(CPUID), %ebx; \
|
|
||||||
movq VMXCTX_PMAP(%rdi), %r11; \
|
|
||||||
movq PM_EPTGEN(%r11), %rax; \
|
|
||||||
cmpq %rax, VMXCTX_EPTGEN(%rdi, %rbx, 8); \
|
|
||||||
je 9f; \
|
|
||||||
\
|
|
||||||
/* Refresh 'vmxctx->eptgen[curcpu]' */ \
|
|
||||||
movq %rax, VMXCTX_EPTGEN(%rdi, %rbx, 8); \
|
|
||||||
\
|
|
||||||
/* Setup the invept descriptor at the top of tmpstk */ \
|
|
||||||
mov %rdi, %r11; \
|
|
||||||
addq $VMXCTX_TMPSTKTOP, %r11; \
|
|
||||||
movq VMXCTX_EPTP(%rdi), %rax; \
|
|
||||||
movq %rax, -16(%r11); \
|
|
||||||
movq $0x0, -8(%r11); \
|
|
||||||
mov $0x1, %eax; /* Single context invalidate */ \
|
|
||||||
invept -16(%r11), %rax; \
|
|
||||||
\
|
|
||||||
/* Check for invept error */ \
|
|
||||||
VM_INSTRUCTION_ERROR(%eax, %rdi); \
|
|
||||||
testl %eax, %eax; \
|
|
||||||
jz 9f; \
|
|
||||||
\
|
|
||||||
/* Return via vmx_setjmp with retval of VMX_RETURN_INVEPT */ \
|
|
||||||
movq $VMX_RETURN_INVEPT, %rsi; \
|
|
||||||
movq %rdi,%rsp; \
|
|
||||||
addq $VMXCTX_TMPSTKTOP, %rsp; \
|
|
||||||
callq vmx_return; \
|
|
||||||
9: ;
|
|
||||||
|
|
||||||
.text
|
/* Setup the invept descriptor on the host stack */
|
||||||
/*
|
mov %rsp, %r11
|
||||||
* int vmx_setjmp(ctxp)
|
movq VMXCTX_EPTP(%rdi), %rax
|
||||||
* %rdi = ctxp
|
movq %rax, -16(%r11)
|
||||||
*
|
movq $0x0, -8(%r11)
|
||||||
* Return value is '0' when it returns directly from here.
|
mov $0x1, %eax /* Single context invalidate */
|
||||||
* Return value is '1' when it returns after a vm exit through vmx_longjmp.
|
invept -16(%r11), %rax
|
||||||
*/
|
jbe invept_error /* Check invept instruction error */
|
||||||
ENTRY(vmx_setjmp)
|
|
||||||
movq (%rsp),%rax /* return address */
|
guest_restore:
|
||||||
movq %r15,VMXCTX_HOST_R15(%rdi)
|
cmpl $0, %esi
|
||||||
movq %r14,VMXCTX_HOST_R14(%rdi)
|
je do_launch
|
||||||
movq %r13,VMXCTX_HOST_R13(%rdi)
|
|
||||||
movq %r12,VMXCTX_HOST_R12(%rdi)
|
VMX_GUEST_RESTORE
|
||||||
movq %rbp,VMXCTX_HOST_RBP(%rdi)
|
vmresume
|
||||||
movq %rsp,VMXCTX_HOST_RSP(%rdi)
|
/*
|
||||||
movq %rbx,VMXCTX_HOST_RBX(%rdi)
|
* In the common case 'vmresume' returns back to the host through
|
||||||
movq %rax,VMXCTX_HOST_RIP(%rdi)
|
* 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
|
||||||
|
*
|
||||||
|
* If there is an error we return VMX_VMRESUME_ERROR to the caller.
|
||||||
|
*/
|
||||||
|
movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
|
||||||
|
movl $VMX_VMRESUME_ERROR, %eax
|
||||||
|
jmp decode_inst_error
|
||||||
|
|
||||||
|
do_launch:
|
||||||
|
VMX_GUEST_RESTORE
|
||||||
|
vmlaunch
|
||||||
|
/*
|
||||||
|
* In the common case 'vmlaunch' returns back to the host through
|
||||||
|
* 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
|
||||||
|
*
|
||||||
|
* If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
|
||||||
|
*/
|
||||||
|
movq %rsp, %rdi /* point %rdi back to 'vmxctx' */
|
||||||
|
movl $VMX_VMLAUNCH_ERROR, %eax
|
||||||
|
jmp decode_inst_error
|
||||||
|
|
||||||
|
invept_error:
|
||||||
|
movl $VMX_INVEPT_ERROR, %eax
|
||||||
|
jmp decode_inst_error
|
||||||
|
|
||||||
|
decode_inst_error:
|
||||||
|
movl $VM_FAIL_VALID, %r11d
|
||||||
|
jz inst_error
|
||||||
|
movl $VM_FAIL_INVALID, %r11d
|
||||||
|
inst_error:
|
||||||
|
movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX save host debug registers
|
* The return value is already populated in %eax so we cannot use
|
||||||
|
* it as a scratch register beyond this point.
|
||||||
*/
|
*/
|
||||||
movl $VMX_RETURN_DIRECT,%eax
|
|
||||||
ret
|
|
||||||
END(vmx_setjmp)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* void vmx_return(struct vmxctx *ctxp, int retval)
|
|
||||||
* %rdi = ctxp
|
|
||||||
* %rsi = retval
|
|
||||||
* Return to vmm context through vmx_setjmp() with a value of 'retval'.
|
|
||||||
*/
|
|
||||||
ENTRY(vmx_return)
|
|
||||||
/* The pmap is no longer active on the host cpu */
|
|
||||||
VMX_CLEAR_PM_ACTIVE
|
|
||||||
|
|
||||||
/* Restore host context. */
|
|
||||||
movq VMXCTX_HOST_R15(%rdi),%r15
|
|
||||||
movq VMXCTX_HOST_R14(%rdi),%r14
|
|
||||||
movq VMXCTX_HOST_R13(%rdi),%r13
|
|
||||||
movq VMXCTX_HOST_R12(%rdi),%r12
|
|
||||||
movq VMXCTX_HOST_RBP(%rdi),%rbp
|
|
||||||
movq VMXCTX_HOST_RSP(%rdi),%rsp
|
|
||||||
movq VMXCTX_HOST_RBX(%rdi),%rbx
|
|
||||||
movq VMXCTX_HOST_RIP(%rdi),%rax
|
|
||||||
movq %rax,(%rsp) /* return address */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX restore host debug registers
|
* Deactivate guest pmap from this cpu.
|
||||||
*/
|
*/
|
||||||
movl %esi,%eax
|
movq VMXCTX_PMAP(%rdi), %r11
|
||||||
|
movl PCPU(CPUID), %r10d
|
||||||
|
LK btrl %r10d, PM_ACTIVE(%r11)
|
||||||
|
|
||||||
|
VMX_HOST_RESTORE(%r10)
|
||||||
ret
|
ret
|
||||||
END(vmx_return)
|
END(vmx_execute_guest)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void vmx_longjmp(void)
|
* void vmx_exit_guest(void)
|
||||||
* %rsp points to the struct vmxctx
|
* %rsp points to the struct vmxctx
|
||||||
*/
|
*/
|
||||||
ENTRY(vmx_longjmp)
|
ENTRY(vmx_exit_guest)
|
||||||
/*
|
/*
|
||||||
* Save guest state that is not automatically saved in the vmcs.
|
* Save guest state that is not automatically saved in the vmcs.
|
||||||
*/
|
*/
|
||||||
@ -242,80 +217,20 @@ ENTRY(vmx_longjmp)
|
|||||||
movq %rdi,VMXCTX_GUEST_CR2(%rsp)
|
movq %rdi,VMXCTX_GUEST_CR2(%rsp)
|
||||||
|
|
||||||
movq %rsp,%rdi
|
movq %rsp,%rdi
|
||||||
movq $VMX_RETURN_LONGJMP,%rsi
|
|
||||||
|
|
||||||
addq $VMXCTX_TMPSTKTOP,%rsp
|
|
||||||
callq vmx_return
|
|
||||||
END(vmx_longjmp)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* void vmx_resume(struct vmxctx *ctxp)
|
|
||||||
* %rdi = ctxp
|
|
||||||
*
|
|
||||||
* Although the return type is a 'void' this function may return indirectly
|
|
||||||
* through vmx_setjmp() with a return value of 2.
|
|
||||||
*/
|
|
||||||
ENTRY(vmx_resume)
|
|
||||||
VMX_DISABLE_INTERRUPTS
|
|
||||||
|
|
||||||
VMX_CHECK_AST
|
|
||||||
|
|
||||||
VMX_SET_PM_ACTIVE /* This vcpu is now active on the host cpu */
|
|
||||||
|
|
||||||
VMX_CHECK_EPTGEN /* Check if we have to invalidate TLB */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore guest state that is not automatically loaded from the vmcs.
|
* Deactivate guest pmap from this cpu.
|
||||||
*/
|
*/
|
||||||
VMX_GUEST_RESTORE
|
movq VMXCTX_PMAP(%rdi), %r11
|
||||||
|
movl PCPU(CPUID), %r10d
|
||||||
|
LK btrl %r10d, PM_ACTIVE(%r11)
|
||||||
|
|
||||||
vmresume
|
VMX_HOST_RESTORE(%r10)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Capture the reason why vmresume failed.
|
* This will return to the caller of 'vmx_enter_guest()' with a return
|
||||||
|
* value of VMX_GUEST_VMEXIT.
|
||||||
*/
|
*/
|
||||||
VM_INSTRUCTION_ERROR(%eax, %rsp)
|
movl $VMX_GUEST_VMEXIT, %eax
|
||||||
|
ret
|
||||||
/* Return via vmx_setjmp with return value of VMX_RETURN_VMRESUME */
|
END(vmx_exit_guest)
|
||||||
movq %rsp,%rdi
|
|
||||||
movq $VMX_RETURN_VMRESUME,%rsi
|
|
||||||
|
|
||||||
addq $VMXCTX_TMPSTKTOP,%rsp
|
|
||||||
callq vmx_return
|
|
||||||
END(vmx_resume)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* void vmx_launch(struct vmxctx *ctxp)
|
|
||||||
* %rdi = ctxp
|
|
||||||
*
|
|
||||||
* Although the return type is a 'void' this function may return indirectly
|
|
||||||
* through vmx_setjmp() with a return value of 3.
|
|
||||||
*/
|
|
||||||
ENTRY(vmx_launch)
|
|
||||||
VMX_DISABLE_INTERRUPTS
|
|
||||||
|
|
||||||
VMX_CHECK_AST
|
|
||||||
|
|
||||||
VMX_SET_PM_ACTIVE /* This vcpu is now active on the host cpu */
|
|
||||||
|
|
||||||
VMX_CHECK_EPTGEN /* Check if we have to invalidate TLB */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Restore guest state that is not automatically loaded from the vmcs.
|
|
||||||
*/
|
|
||||||
VMX_GUEST_RESTORE
|
|
||||||
|
|
||||||
vmlaunch
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Capture the reason why vmlaunch failed.
|
|
||||||
*/
|
|
||||||
VM_INSTRUCTION_ERROR(%eax, %rsp)
|
|
||||||
|
|
||||||
/* Return via vmx_setjmp with return value of VMX_RETURN_VMLAUNCH */
|
|
||||||
movq %rsp,%rdi
|
|
||||||
movq $VMX_RETURN_VMLAUNCH,%rsi
|
|
||||||
|
|
||||||
addq $VMXCTX_TMPSTKTOP,%rsp
|
|
||||||
callq vmx_return
|
|
||||||
END(vmx_launch)
|
|
||||||
|
@ -388,10 +388,12 @@ vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
|
|||||||
fprintf(stderr, "\treason\t\tVMX\n");
|
fprintf(stderr, "\treason\t\tVMX\n");
|
||||||
fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
|
fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
|
||||||
fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
|
fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
|
||||||
fprintf(stderr, "\terror\t\t%d\n", vmexit->u.vmx.error);
|
fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status);
|
||||||
fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
|
fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
|
||||||
fprintf(stderr, "\tqualification\t0x%016lx\n",
|
fprintf(stderr, "\tqualification\t0x%016lx\n",
|
||||||
vmexit->u.vmx.exit_qualification);
|
vmexit->u.vmx.exit_qualification);
|
||||||
|
fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type);
|
||||||
|
fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error);
|
||||||
|
|
||||||
return (VMEXIT_ABORT);
|
return (VMEXIT_ABORT);
|
||||||
}
|
}
|
||||||
|
@ -273,11 +273,13 @@ dump_vm_run_exitcode(struct vm_exit *vmexit, int vcpu)
|
|||||||
break;
|
break;
|
||||||
case VM_EXITCODE_VMX:
|
case VM_EXITCODE_VMX:
|
||||||
printf("\treason\t\tVMX\n");
|
printf("\treason\t\tVMX\n");
|
||||||
printf("\terror\t\t%d\n", vmexit->u.vmx.error);
|
printf("\tstatus\t\t%d\n", vmexit->u.vmx.status);
|
||||||
printf("\texit_reason\t0x%08x (%u)\n",
|
printf("\texit_reason\t0x%08x (%u)\n",
|
||||||
vmexit->u.vmx.exit_reason, vmexit->u.vmx.exit_reason);
|
vmexit->u.vmx.exit_reason, vmexit->u.vmx.exit_reason);
|
||||||
printf("\tqualification\t0x%016lx\n",
|
printf("\tqualification\t0x%016lx\n",
|
||||||
vmexit->u.vmx.exit_qualification);
|
vmexit->u.vmx.exit_qualification);
|
||||||
|
printf("\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type);
|
||||||
|
printf("\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
printf("*** unknown vm run exitcode %d\n", vmexit->exitcode);
|
printf("*** unknown vm run exitcode %d\n", vmexit->exitcode);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user