Allow more VMCB fields to be cached:

- CR2
- CR0, CR3, CR4 and EFER
- GDT/IDT base/limit fields
- CS/DS/ES/SS selector/base/limit/attrib fields

The caching can be further restricted via the tunable 'hw.vmm.svm.vmcb_clean'.

Restructure the code such that the fields above are only modified in a single
place. This makes it easy to invalidate the VMCB cache when any of these fields
is modified.
This commit is contained in:
neel 2014-09-21 23:42:54 +00:00
parent 5834276187
commit 35aaa3ac11
5 changed files with 347 additions and 325 deletions

View File

@ -89,16 +89,22 @@ SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL);
VMCB_CACHE_IOPM | \
VMCB_CACHE_I | \
VMCB_CACHE_TPR | \
VMCB_CACHE_CR2 | \
VMCB_CACHE_CR | \
VMCB_CACHE_DT | \
VMCB_CACHE_SEG | \
VMCB_CACHE_NP)
static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
0, NULL);
MALLOC_DEFINE(M_SVM, "svm", "svm");
MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
/* Per-CPU context area. */
extern struct pcpu __pcpu[];
static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc);
static uint32_t svm_feature; /* AMD SVM features. */
SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0,
"SVM features advertised by CPUID.8000000AH:EDX");
@ -129,6 +135,8 @@ static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
/*
* Common function to enable or disabled SVM for a CPU.
*/
@ -292,6 +300,8 @@ svm_init(int ipinum)
if (err)
return (err);
vmcb_clean &= VMCB_CACHE_DEFAULT;
for (cpu = 0; cpu < MAXCPU; cpu++) {
/*
* Initialize the host ASIDs to their "highest" valid values.
@ -410,16 +420,6 @@ svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
return svm_msr_perm(perm_bitmap, msr, true, false);
}
static __inline void
vcpu_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
{
struct svm_vcpu *vcpustate;
vcpustate = svm_get_vcpu(sc, vcpu);
vcpustate->dirty |= dirtybits;
}
static __inline int
svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
{
@ -449,7 +449,7 @@ svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
ctrl->intercept[idx] &= ~bitmask;
if (ctrl->intercept[idx] != oldval) {
vcpu_set_dirty(sc, vcpu, VMCB_CACHE_I);
svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
"from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
}
@ -592,6 +592,10 @@ svm_vminit(struct vm *vm, pmap_t pmap)
svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
/*
* Intercept writes to make sure that the EFER_SVM bit is not cleared.
*/
svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
/* Intercept access to all I/O ports. */
@ -627,18 +631,22 @@ svm_cpl(struct vmcb_state *state)
static enum vm_cpu_mode
svm_vcpu_mode(struct vmcb *vmcb)
{
struct vmcb_segment *seg;
struct vmcb_segment seg;
struct vmcb_state *state;
int error;
state = &vmcb->state;
if (state->efer & EFER_LMA) {
seg = vmcb_seg(vmcb, VM_REG_GUEST_CS);
error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__,
error));
/*
* Section 4.8.1 for APM2, check if Code Segment has
* Long attribute set in descriptor.
*/
if (seg->attrib & VMCB_CS_ATTRIB_L)
if (seg.attrib & VMCB_CS_ATTRIB_L)
return (CPU_MODE_64BIT);
else
return (CPU_MODE_COMPATIBILITY);
@ -700,7 +708,7 @@ svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
vis->seg_name = vm_segment_name(s);
}
error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
}
@ -824,10 +832,10 @@ static void
svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
{
struct vm_guest_paging *paging;
struct vmcb_segment *seg;
struct vmcb_segment seg;
struct vmcb_ctrl *ctrl;
char *inst_bytes;
int inst_len;
int error, inst_len;
ctrl = &vmcb->ctrl;
paging = &vmexit->u.inst_emul.paging;
@ -837,14 +845,16 @@ svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
svm_paging_info(vmcb, paging);
seg = vmcb_seg(vmcb, VM_REG_GUEST_CS);
error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error));
switch(paging->cpu_mode) {
case CPU_MODE_PROTECTED:
case CPU_MODE_COMPATIBILITY:
/*
* Section 4.8.1 of APM2, Default Operand Size or D bit.
*/
vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ?
vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
1 : 0;
break;
default:
@ -865,28 +875,6 @@ svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
}
/*
* Intercept access to MSR_EFER to prevent the guest from clearing the
* SVM enable bit.
*/
static int
svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t val)
{
struct vmcb_state *state;
uint64_t oldval;
state = svm_get_vmcb_state(sc, vcpu);
oldval = state->efer;
state->efer = val | EFER_SVM;
if (state->efer != oldval) {
VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx",
oldval, state->efer);
vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR);
}
return (0);
}
#ifdef KTR
static const char *
intrtype_to_str(int intr_type)
@ -1028,7 +1016,7 @@ enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
ctrl->v_irq = 1;
ctrl->v_ign_tpr = 1;
ctrl->v_intr_vector = 0;
vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
}
@ -1053,7 +1041,7 @@ disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
#endif
ctrl->v_irq = 0;
ctrl->v_intr_vector = 0;
vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
}
@ -1144,7 +1132,7 @@ emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
if (lapic_msr(num))
error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
else if (num == MSR_EFER)
error = svm_write_efer(sc, vcpu, val);
error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val);
else
error = svm_wrmsr(sc, vcpu, num, val, retu);
@ -1622,7 +1610,7 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
ctrl->v_tpr, v_tpr);
ctrl->v_tpr = v_tpr;
vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
}
if (pending_apic_vector) {
@ -1638,7 +1626,7 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
ctrl->v_ign_tpr = 0;
ctrl->v_intr_vector = pending_apic_vector;
ctrl->v_intr_prio = pending_apic_vector >> 4;
vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
} else if (need_intr_window) {
/*
* We use V_IRQ in conjunction with the VINTR intercept to
@ -1764,7 +1752,7 @@ check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
vcpustate->asid.num = asid[thiscpu].num;
ctrl->asid = vcpustate->asid.num;
vcpu_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
/*
* If this cpu supports "flush-by-asid" then the TLB
* was not flushed after the generation bump. The TLB
@ -1830,7 +1818,7 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
/*
* Invalidate the VMCB state cache by marking all fields dirty.
*/
vcpu_set_dirty(svm_sc, vcpu, 0xffffffff);
svm_set_dirty(svm_sc, vcpu, 0xffffffff);
/*
* XXX
@ -1891,7 +1879,7 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
*/
check_asid(svm_sc, vcpu, pmap, thiscpu);
ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty;
ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
vcpustate->dirty = 0;
VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
@ -2001,17 +1989,15 @@ static int
svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
{
struct svm_softc *svm_sc;
struct vmcb *vmcb;
register_t *reg;
svm_sc = arg;
vmcb = svm_get_vmcb(svm_sc, vcpu);
if (ident == VM_REG_GUEST_INTR_SHADOW) {
return (svm_get_intr_shadow(svm_sc, vcpu, val));
}
if (vmcb_read(vmcb, ident, val) == 0) {
if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
return (0);
}
@ -2034,17 +2020,15 @@ static int
svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
{
struct svm_softc *svm_sc;
struct vmcb *vmcb;
register_t *reg;
svm_sc = arg;
vmcb = svm_get_vmcb(svm_sc, vcpu);
if (ident == VM_REG_GUEST_INTR_SHADOW) {
return (svm_modify_intr_shadow(svm_sc, vcpu, val));
}
if (vmcb_write(vmcb, ident, val) == 0) {
if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
return (0);
}
@ -2065,81 +2049,6 @@ svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
return (EINVAL);
}
/*
* Inteface to set various descriptors.
*/
static int
svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc)
{
struct svm_softc *svm_sc;
struct vmcb *vmcb;
struct vmcb_segment *seg;
uint16_t attrib;
svm_sc = arg;
vmcb = svm_get_vmcb(svm_sc, vcpu);
VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type);
seg = vmcb_seg(vmcb, type);
if (seg == NULL) {
ERR("SVM_ERR:Unsupported segment type%d\n", type);
return (EINVAL);
}
/* Map seg_desc access to VMCB attribute format.*/
attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n",
type, desc->access, desc->limit);
seg->attrib = attrib;
seg->base = desc->base;
seg->limit = desc->limit;
return (0);
}
/*
* Interface to get guest descriptor.
*/
static int
svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc)
{
struct svm_softc *svm_sc;
struct vmcb_segment *seg;
svm_sc = arg;
VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type);
seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type);
if (!seg) {
ERR("SVM_ERR:Unsupported segment type%d\n", type);
return (EINVAL);
}
/* Map seg_desc access to VMCB attribute format.*/
desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF);
desc->base = seg->base;
desc->limit = seg->limit;
/*
* VT-x uses bit 16 (Unusable) to indicate a segment that has been
* loaded with a NULL segment selector. The 'desc->access' field is
* interpreted in the VT-x format by the processor-independent code.
*
* SVM uses the 'P' bit to convey the same information so convert it
* into the VT-x format. For more details refer to section
* "Segment State in the VMCB" in APMv2.
*/
if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR)
desc->access |= 0x80; /* CS and TS always present */
if (!(desc->access & 0x80))
desc->access |= 0x10000; /* Unusable segment */
return (0);
}
static int
svm_setcap(void *arg, int vcpu, int type, int val)
{
@ -2231,8 +2140,8 @@ struct vmm_ops vmm_ops_amd = {
svm_vmcleanup,
svm_getreg,
svm_setreg,
svm_getdesc,
svm_setdesc,
vmcb_getdesc,
vmcb_setdesc,
svm_getcap,
svm_setcap,
svm_npt_alloc,

View File

@ -92,25 +92,4 @@ enable_gintr(void)
__asm __volatile("stgi" : : :);
}
static __inline void
save_cr2(uint64_t *cr2)
{
__asm __volatile(
"mov %%cr2, %%rax; movq %%rax, %0"
:"=m"(*cr2)
:
:"rax", "memory");
}
static __inline void
load_cr2(uint64_t *cr2)
{
__asm __volatile(
"movq %0, %%rax; movq %%rax, %%cr2"
:
:"m"(*cr2)
:"rax");
}
#endif /* _SVM_H_ */

View File

@ -116,5 +116,14 @@ svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
return (&(sc->vcpu[vcpu].swctx));
}
void svm_dump_vmcb(struct svm_softc *svm_sc, int vcpu);
static __inline void
svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
{
struct svm_vcpu *vcpustate;
vcpustate = svm_get_vcpu(sc, vcpu);
vcpustate->dirty |= dirtybits;
}
#endif /* _SVM_SOFTC_H_ */

View File

@ -35,8 +35,11 @@ __FBSDID("$FreeBSD$");
#include <machine/specialreg.h>
#include <machine/vmm.h>
#include "vmm_ktr.h"
#include "vmcb.h"
#include "svm.h"
#include "svm_softc.h"
/*
* The VMCB aka Virtual Machine Control Block is a 4KB aligned page
@ -48,175 +51,11 @@ __FBSDID("$FreeBSD$");
* - guest processor state (e.g. general purpose registers)
*/
/*
* Read from segment selector, control and general purpose register of VMCB.
*/
int
vmcb_read(struct vmcb *vmcb, int ident, uint64_t *retval)
{
struct vmcb_state *state;
struct vmcb_segment *seg;
int err;
state = &vmcb->state;
err = 0;
switch (ident) {
case VM_REG_GUEST_CR0:
*retval = state->cr0;
break;
case VM_REG_GUEST_CR2:
*retval = state->cr2;
break;
case VM_REG_GUEST_CR3:
*retval = state->cr3;
break;
case VM_REG_GUEST_CR4:
*retval = state->cr4;
break;
case VM_REG_GUEST_DR7:
*retval = state->dr7;
break;
case VM_REG_GUEST_EFER:
*retval = state->efer;
break;
case VM_REG_GUEST_RAX:
*retval = state->rax;
break;
case VM_REG_GUEST_RFLAGS:
*retval = state->rflags;
break;
case VM_REG_GUEST_RIP:
*retval = state->rip;
break;
case VM_REG_GUEST_RSP:
*retval = state->rsp;
break;
case VM_REG_GUEST_CS:
case VM_REG_GUEST_DS:
case VM_REG_GUEST_ES:
case VM_REG_GUEST_FS:
case VM_REG_GUEST_GS:
case VM_REG_GUEST_SS:
case VM_REG_GUEST_GDTR:
case VM_REG_GUEST_IDTR:
case VM_REG_GUEST_LDTR:
case VM_REG_GUEST_TR:
seg = vmcb_seg(vmcb, ident);
if (seg == NULL) {
ERR("Invalid seg type %d\n", ident);
err = EINVAL;
break;
}
*retval = seg->selector;
break;
default:
err = EINVAL;
break;
}
return (err);
}
/*
* Write to segment selector, control and general purpose register of VMCB.
*/
int
vmcb_write(struct vmcb *vmcb, int ident, uint64_t val)
{
struct vmcb_state *state;
struct vmcb_segment *seg;
int err;
state = &vmcb->state;
err = 0;
switch (ident) {
case VM_REG_GUEST_CR0:
state->cr0 = val;
break;
case VM_REG_GUEST_CR2:
state->cr2 = val;
break;
case VM_REG_GUEST_CR3:
state->cr3 = val;
break;
case VM_REG_GUEST_CR4:
state->cr4 = val;
break;
case VM_REG_GUEST_DR7:
state->dr7 = val;
break;
case VM_REG_GUEST_EFER:
/* EFER_SVM must always be set when the guest is executing */
state->efer = val | EFER_SVM;
break;
case VM_REG_GUEST_RAX:
state->rax = val;
break;
case VM_REG_GUEST_RFLAGS:
state->rflags = val;
break;
case VM_REG_GUEST_RIP:
state->rip = val;
break;
case VM_REG_GUEST_RSP:
state->rsp = val;
break;
case VM_REG_GUEST_CS:
case VM_REG_GUEST_DS:
case VM_REG_GUEST_ES:
case VM_REG_GUEST_FS:
case VM_REG_GUEST_GS:
case VM_REG_GUEST_SS:
case VM_REG_GUEST_GDTR:
case VM_REG_GUEST_IDTR:
case VM_REG_GUEST_LDTR:
case VM_REG_GUEST_TR:
seg = vmcb_seg(vmcb, ident);
if (seg == NULL) {
ERR("Invalid segment type %d\n", ident);
err = EINVAL;
break;
}
seg->selector = val;
break;
default:
err = EINVAL;
}
return (err);
}
/*
* Return VMCB segment area.
*/
struct vmcb_segment *
vmcb_seg(struct vmcb *vmcb, int type)
static struct vmcb_segment *
vmcb_segptr(struct vmcb *vmcb, int type)
{
struct vmcb_state *state;
struct vmcb_segment *seg;
@ -271,3 +110,285 @@ vmcb_seg(struct vmcb *vmcb, int type)
return (seg);
}
/*
* Read from segment selector, control and general purpose register of VMCB.
*/
int
vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
{
struct vmcb *vmcb;
struct vmcb_state *state;
struct vmcb_segment *seg;
int err;
vmcb = svm_get_vmcb(sc, vcpu);
state = &vmcb->state;
err = 0;
switch (ident) {
case VM_REG_GUEST_CR0:
*retval = state->cr0;
break;
case VM_REG_GUEST_CR2:
*retval = state->cr2;
break;
case VM_REG_GUEST_CR3:
*retval = state->cr3;
break;
case VM_REG_GUEST_CR4:
*retval = state->cr4;
break;
case VM_REG_GUEST_DR7:
*retval = state->dr7;
break;
case VM_REG_GUEST_EFER:
*retval = state->efer;
break;
case VM_REG_GUEST_RAX:
*retval = state->rax;
break;
case VM_REG_GUEST_RFLAGS:
*retval = state->rflags;
break;
case VM_REG_GUEST_RIP:
*retval = state->rip;
break;
case VM_REG_GUEST_RSP:
*retval = state->rsp;
break;
case VM_REG_GUEST_CS:
case VM_REG_GUEST_DS:
case VM_REG_GUEST_ES:
case VM_REG_GUEST_FS:
case VM_REG_GUEST_GS:
case VM_REG_GUEST_SS:
case VM_REG_GUEST_LDTR:
case VM_REG_GUEST_TR:
seg = vmcb_segptr(vmcb, ident);
KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
__func__, ident));
*retval = seg->selector;
break;
case VM_REG_GUEST_GDTR:
case VM_REG_GUEST_IDTR:
/* GDTR and IDTR don't have segment selectors */
err = EINVAL;
break;
default:
err = EINVAL;
break;
}
return (err);
}
/*
* Write to segment selector, control and general purpose register of VMCB.
*/
int
vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
{
struct vmcb *vmcb;
struct vmcb_state *state;
struct vmcb_segment *seg;
int err, dirtyseg;
vmcb = svm_get_vmcb(sc, vcpu);
state = &vmcb->state;
dirtyseg = 0;
err = 0;
switch (ident) {
case VM_REG_GUEST_CR0:
state->cr0 = val;
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
break;
case VM_REG_GUEST_CR2:
state->cr2 = val;
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
break;
case VM_REG_GUEST_CR3:
state->cr3 = val;
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
break;
case VM_REG_GUEST_CR4:
state->cr4 = val;
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
break;
case VM_REG_GUEST_DR7:
state->dr7 = val;
break;
case VM_REG_GUEST_EFER:
/* EFER_SVM must always be set when the guest is executing */
state->efer = val | EFER_SVM;
svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
break;
case VM_REG_GUEST_RAX:
state->rax = val;
break;
case VM_REG_GUEST_RFLAGS:
state->rflags = val;
break;
case VM_REG_GUEST_RIP:
state->rip = val;
break;
case VM_REG_GUEST_RSP:
state->rsp = val;
break;
case VM_REG_GUEST_CS:
case VM_REG_GUEST_DS:
case VM_REG_GUEST_ES:
case VM_REG_GUEST_SS:
dirtyseg = 1; /* FALLTHROUGH */
case VM_REG_GUEST_FS:
case VM_REG_GUEST_GS:
case VM_REG_GUEST_LDTR:
case VM_REG_GUEST_TR:
seg = vmcb_segptr(vmcb, ident);
KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
__func__, ident));
seg->selector = val;
if (dirtyseg)
svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
break;
case VM_REG_GUEST_GDTR:
case VM_REG_GUEST_IDTR:
/* GDTR and IDTR don't have segment selectors */
err = EINVAL;
break;
default:
err = EINVAL;
break;
}
return (err);
}
int
vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
{
struct vmcb_segment *seg;
seg = vmcb_segptr(vmcb, ident);
if (seg != NULL) {
bcopy(seg, seg2, sizeof(struct vmcb_segment));
return (0);
} else {
return (EINVAL);
}
}
int
vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
{
struct vmcb *vmcb;
struct svm_softc *sc;
struct vmcb_segment *seg;
uint16_t attrib;
sc = arg;
vmcb = svm_get_vmcb(sc, vcpu);
seg = vmcb_segptr(vmcb, reg);
KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
__func__, reg));
seg->base = desc->base;
seg->limit = desc->limit;
if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
/*
* Map seg_desc access to VMCB attribute format.
*
* SVM uses the 'P' bit in the segment attributes to indicate a
* NULL segment so clear it if the segment is marked unusable.
*/
attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
if (SEG_DESC_UNUSABLE(desc->access)) {
attrib &= ~0x80;
}
seg->attrib = attrib;
}
VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
"attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
switch (reg) {
case VM_REG_GUEST_CS:
case VM_REG_GUEST_DS:
case VM_REG_GUEST_ES:
case VM_REG_GUEST_SS:
svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
case VM_REG_GUEST_GDTR:
case VM_REG_GUEST_IDTR:
svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
break;
default:
break;
}
return (0);
}
int
vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
{
struct vmcb *vmcb;
struct svm_softc *sc;
struct vmcb_segment *seg;
sc = arg;
vmcb = svm_get_vmcb(sc, vcpu);
seg = vmcb_segptr(vmcb, reg);
KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
__func__, reg));
desc->base = seg->base;
desc->limit = seg->limit;
desc->access = 0;
if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
/* Map seg_desc access to VMCB attribute format */
desc->access = ((seg->attrib & 0xF00) << 4) |
(seg->attrib & 0xFF);
/*
* VT-x uses bit 16 to indicate a segment that has been loaded
* with a NULL selector (aka unusable). The 'desc->access'
* field is interpreted in the VT-x format by the
* processor-independent code.
*
* SVM uses the 'P' bit to convey the same information so
* convert it into the VT-x format. For more details refer to
* section "Segment State in the VMCB" in APMv2.
*/
if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
if ((desc->access & 0x80) == 0)
desc->access |= 0x10000; /* Unusable segment */
}
}
return (0);
}

View File

@ -29,6 +29,8 @@
#ifndef _VMCB_H_
#define _VMCB_H_
struct svm_softc;
/*
* Secure Virtual Machine: AMD64 Programmer's Manual Vol2, Chapter 15
* Layout of VMCB: AMD64 Programmer's Manual Vol2, Appendix B
@ -279,8 +281,10 @@ struct vmcb {
CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
CTASSERT(offsetof(struct vmcb, state) == 0x400);
int vmcb_read(struct vmcb *vmcb, int ident, uint64_t *retval);
int vmcb_write(struct vmcb *vmcb, int ident, uint64_t val);
struct vmcb_segment *vmcb_seg(struct vmcb *vmcb, int type);
int vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval);
int vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val);
int vmcb_setdesc(void *arg, int vcpu, int ident, struct seg_desc *desc);
int vmcb_getdesc(void *arg, int vcpu, int ident, struct seg_desc *desc);
int vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg);
#endif /* _VMCB_H_ */