Add virtualized XSAVE support to bhyve which permits guests to use XSAVE and XSAVE-enabled features like AVX. - Store a per-cpu guest xcr0 register and handle xsetbv VM exits by emulating the instruction. - Only expose XSAVE to guests if XSAVE is enabled in the host. Only expose a subset of XSAVE features currently supported by the guest and for which the proper emulation of xsetbv is known. Currently this includes X87, SSE, AVX, AVX-512, and Intel MPX. - Add support for injecting hardware exceptions into the guest and use this to trigger exceptions in the guest for invalid xsetbv operations instead of potentially faulting in the host. - Queue pending exceptions in the 'struct vcpu' instead of directly updating the processor-specific VMCS or VMCB. The pending exception will be delivered right before entering the guest. - Rename the unused ioctl VM_INJECT_EVENT to VM_INJECT_EXCEPTION and restrict it to only deliver x86 hardware exceptions. This new ioctl is now used to inject a protection fault when the guest accesses an unimplemented MSR. - Expose a subset of known-safe features from leaf 0 of the structured extended features to guests if they are supported on the host including RDFSBASE/RDGSBASE, BMI1/2, AVX2, AVX-512, HLE, ERMS, and RTM. Aside from AVX-512, these features are all new instructions available for use in ring 3 with no additional hypervisor changes needed.
84 lines
2.4 KiB
C
84 lines
2.4 KiB
C
/*-
|
|
* Copyright (c) 2012 NetApp, Inc.
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#ifndef _VMM_HOST_H_
|
|
#define _VMM_HOST_H_
|
|
|
|
#ifndef _KERNEL
|
|
#error "no user-servicable parts inside"
|
|
#endif
|
|
|
|
struct xsave_limits {
|
|
int xsave_enabled;
|
|
uint64_t xcr0_allowed;
|
|
uint32_t xsave_max_size;
|
|
};
|
|
|
|
void vmm_host_state_init(void);
|
|
|
|
uint64_t vmm_get_host_pat(void);
|
|
uint64_t vmm_get_host_efer(void);
|
|
uint64_t vmm_get_host_cr0(void);
|
|
uint64_t vmm_get_host_cr4(void);
|
|
uint64_t vmm_get_host_xcr0(void);
|
|
uint64_t vmm_get_host_datasel(void);
|
|
uint64_t vmm_get_host_codesel(void);
|
|
uint64_t vmm_get_host_tsssel(void);
|
|
uint64_t vmm_get_host_fsbase(void);
|
|
uint64_t vmm_get_host_idtrbase(void);
|
|
const struct xsave_limits *vmm_get_xsave_limits(void);
|
|
|
|
/*
|
|
* Inline access to host state that is used on every VM entry
|
|
*/
|
|
static __inline uint64_t
|
|
vmm_get_host_trbase(void)
|
|
{
|
|
|
|
return ((uint64_t)PCPU_GET(tssp));
|
|
}
|
|
|
|
static __inline uint64_t
|
|
vmm_get_host_gdtrbase(void)
|
|
{
|
|
|
|
return ((uint64_t)&gdt[NGDT * curcpu]);
|
|
}
|
|
|
|
struct pcpu;
|
|
extern struct pcpu __pcpu[];
|
|
|
|
static __inline uint64_t
|
|
vmm_get_host_gsbase(void)
|
|
{
|
|
|
|
return ((uint64_t)&__pcpu[curcpu]);
|
|
}
|
|
|
|
#endif
|