freebsd-skq/sys/amd64/vmm/intel/vmx_support.S
Neel Natu ad54f37429 Fix a long standing bug in VMXCTX_GUEST_RESTORE().
There was an assumption by the "callers" of this macro that on "return" the
%rsp will be pointing to the 'vmxctx'. The macro was not doing this and thus
when trying to restore host state on an error from "vmlaunch" or "vmresume"
we were treating the memory locations on the host stack as 'struct vmxctx'.
This led to all sorts of weird bugs like double faults or invalid instruction
faults.

This bug is exposed by the -O2 option used to compile the kernel module. With
the -O2 flag the compiler will optimize the following piece of code:

	int loopstart = 1;
	...
	if (loopstart) {
		loopstart = 0;
		vmx_launch();
	} else
		vmx_resume();

into this:

	vmx_launch();

Since vmx_launch() and vmx_resume() are declared to be __dead2 functions the
compiler is free to do this. The compiler has no way to know that the
functions return indirectly through vmx_setjmp(). This optimization in turn
leads us to trigger the bug in VMXCTX_GUEST_RESTORE().

With this change we can boot a 8.1 guest on a 9.0 host.

Reported by: jhb@
2011-05-20 03:23:09 +00:00

219 lines
6.1 KiB
ArmAsm

/*-
* Copyright (c) 2011 NetApp, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asmacros.h>
#include "vmx_assym.s"
/*
* Assumes that %rdi holds a pointer to the 'vmxctx'.
*
* On "return" all registers are updated to reflect guest state. The two
* exceptions are %rip and %rsp. These registers are atomically switched
* by hardware from the guest area of the vmcs.
*
* We modify %rsp to point to the 'vmxctx' so we can use it to restore
* host context in case of an error with 'vmlaunch' or 'vmresume'.
*/
#define VMX_GUEST_RESTORE \
/* \
* Disable interrupts before updating %rsp. The location that \
* %rsp points to is a 'vmxctx' and not a real stack so we \
* don't want an interrupt handler to trash it. \
*/ \
cli; \
movq %rdi,%rsp; \
movq VMXCTX_GUEST_CR2(%rdi),%rsi; \
movq %rsi,%cr2; \
movq VMXCTX_GUEST_RSI(%rdi),%rsi; \
movq VMXCTX_GUEST_RDX(%rdi),%rdx; \
movq VMXCTX_GUEST_RCX(%rdi),%rcx; \
movq VMXCTX_GUEST_R8(%rdi),%r8; \
movq VMXCTX_GUEST_R9(%rdi),%r9; \
movq VMXCTX_GUEST_RAX(%rdi),%rax; \
movq VMXCTX_GUEST_RBX(%rdi),%rbx; \
movq VMXCTX_GUEST_RBP(%rdi),%rbp; \
movq VMXCTX_GUEST_R10(%rdi),%r10; \
movq VMXCTX_GUEST_R11(%rdi),%r11; \
movq VMXCTX_GUEST_R12(%rdi),%r12; \
movq VMXCTX_GUEST_R13(%rdi),%r13; \
movq VMXCTX_GUEST_R14(%rdi),%r14; \
movq VMXCTX_GUEST_R15(%rdi),%r15; \
movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
#define VM_INSTRUCTION_ERROR(reg) \
jnc 1f; \
movl $VM_FAIL_INVALID,reg; /* CF is set */ \
jmp 3f; \
1: jnz 2f; \
movl $VM_FAIL_VALID,reg; /* ZF is set */ \
jmp 3f; \
2: movl $VM_SUCCESS,reg; \
3: movl reg,VMXCTX_LAUNCH_ERROR(%rsp)
.text
/*
* int vmx_setjmp(ctxp)
* %rdi = ctxp
*
* Return value is '0' when it returns directly from here.
* Return value is '1' when it returns after a vm exit through vmx_longjmp.
*/
ENTRY(vmx_setjmp)
movq (%rsp),%rax /* return address */
movq %r15,VMXCTX_HOST_R15(%rdi)
movq %r14,VMXCTX_HOST_R14(%rdi)
movq %r13,VMXCTX_HOST_R13(%rdi)
movq %r12,VMXCTX_HOST_R12(%rdi)
movq %rbp,VMXCTX_HOST_RBP(%rdi)
movq %rsp,VMXCTX_HOST_RSP(%rdi)
movq %rbx,VMXCTX_HOST_RBX(%rdi)
movq %rax,VMXCTX_HOST_RIP(%rdi)
/*
* XXX save host debug registers
*/
movl $VMX_RETURN_DIRECT,%eax
ret
END(vmx_setjmp)
/*
* void vmx_return(struct vmxctx *ctxp, int retval)
* %rdi = ctxp
* %rsi = retval
* Return to vmm context through vmx_setjmp() with a value of 'retval'.
*/
ENTRY(vmx_return)
/* Restore host context. */
movq VMXCTX_HOST_R15(%rdi),%r15
movq VMXCTX_HOST_R14(%rdi),%r14
movq VMXCTX_HOST_R13(%rdi),%r13
movq VMXCTX_HOST_R12(%rdi),%r12
movq VMXCTX_HOST_RBP(%rdi),%rbp
movq VMXCTX_HOST_RSP(%rdi),%rsp
movq VMXCTX_HOST_RBX(%rdi),%rbx
movq VMXCTX_HOST_RIP(%rdi),%rax
movq %rax,(%rsp) /* return address */
/*
* XXX restore host debug registers
*/
movl %esi,%eax
ret
END(vmx_return)
/*
* void vmx_longjmp(void)
* %rsp points to the struct vmxctx
*/
ENTRY(vmx_longjmp)
/*
* Save guest state that is not automatically saved in the vmcs.
*/
movq %rdi,VMXCTX_GUEST_RDI(%rsp)
movq %rsi,VMXCTX_GUEST_RSI(%rsp)
movq %rdx,VMXCTX_GUEST_RDX(%rsp)
movq %rcx,VMXCTX_GUEST_RCX(%rsp)
movq %r8,VMXCTX_GUEST_R8(%rsp)
movq %r9,VMXCTX_GUEST_R9(%rsp)
movq %rax,VMXCTX_GUEST_RAX(%rsp)
movq %rbx,VMXCTX_GUEST_RBX(%rsp)
movq %rbp,VMXCTX_GUEST_RBP(%rsp)
movq %r10,VMXCTX_GUEST_R10(%rsp)
movq %r11,VMXCTX_GUEST_R11(%rsp)
movq %r12,VMXCTX_GUEST_R12(%rsp)
movq %r13,VMXCTX_GUEST_R13(%rsp)
movq %r14,VMXCTX_GUEST_R14(%rsp)
movq %r15,VMXCTX_GUEST_R15(%rsp)
movq %cr2,%rdi
movq %rdi,VMXCTX_GUEST_CR2(%rsp)
movq %rsp,%rdi
movq $VMX_RETURN_LONGJMP,%rsi
addq $VMXCTX_TMPSTKTOP,%rsp
callq vmx_return
END(vmx_longjmp)
/*
* void vmx_resume(struct vmxctx *ctxp)
* %rdi = ctxp
*
* Although the return type is a 'void' this function may return indirectly
* through vmx_setjmp() with a return value of 2.
*/
ENTRY(vmx_resume)
/*
* Restore guest state that is not automatically loaded from the vmcs.
*/
VMX_GUEST_RESTORE
vmresume
/*
* Capture the reason why vmresume failed.
*/
VM_INSTRUCTION_ERROR(%eax)
/* Return via vmx_setjmp with return value of VMX_RETURN_VMRESUME */
movq %rsp,%rdi
movq $VMX_RETURN_VMRESUME,%rsi
addq $VMXCTX_TMPSTKTOP,%rsp
callq vmx_return
END(vmx_resume)
/*
* void vmx_launch(struct vmxctx *ctxp)
* %rdi = ctxp
*
* Although the return type is a 'void' this function may return indirectly
* through vmx_setjmp() with a return value of 3.
*/
ENTRY(vmx_launch)
/*
* Restore guest state that is not automatically loaded from the vmcs.
*/
VMX_GUEST_RESTORE
vmlaunch
/*
* Capture the reason why vmlaunch failed.
*/
VM_INSTRUCTION_ERROR(%eax)
/* Return via vmx_setjmp with return value of VMX_RETURN_VMLAUNCH */
movq %rsp,%rdi
movq $VMX_RETURN_VMLAUNCH,%rsi
addq $VMXCTX_TMPSTKTOP,%rsp
callq vmx_return
END(vmx_launch)