Support guest accesses to %cr8.

Reviewed by:	neel
This commit is contained in:
Tycho Nightingale 2014-06-06 18:23:49 +00:00
parent 997b054cf1
commit 594db0024e
3 changed files with 200 additions and 87 deletions

View File

@ -83,7 +83,9 @@ __FBSDID("$FreeBSD$");
(PROCBASED_SECONDARY_CONTROLS | \
PROCBASED_IO_EXITING | \
PROCBASED_MSR_BITMAPS | \
PROCBASED_CTLS_WINDOW_SETTING)
PROCBASED_CTLS_WINDOW_SETTING | \
PROCBASED_CR8_LOAD_EXITING | \
PROCBASED_CR8_STORE_EXITING)
#define PROCBASED_CTLS_ZERO_SETTING \
(PROCBASED_CR3_LOAD_EXITING | \
PROCBASED_CR3_STORE_EXITING | \
@ -713,6 +715,13 @@ vmx_init(int ipinum)
procbased_ctls2 |= procbased2_vid_bits;
procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
/*
* No need to emulate accesses to %CR8 if virtual
* interrupt delivery is enabled.
*/
procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
/*
* Check for Posted Interrupts only if Virtual Interrupt
* Delivery is enabled.
@ -1426,97 +1435,130 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
return (HANDLED);
}
static int
vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
static uint64_t
vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
{
int cr, vmcs_guest_cr, vmcs_shadow_cr;
uint64_t crval, regval, ones_mask, zeros_mask;
const struct vmxctx *vmxctx;
/* We only handle mov to %cr0 or %cr4 at this time */
vmxctx = &vmx->ctx[vcpu];
switch (ident) {
case 0:
return (vmxctx->guest_rax);
case 1:
return (vmxctx->guest_rcx);
case 2:
return (vmxctx->guest_rdx);
case 3:
return (vmxctx->guest_rbx);
case 4:
return (vmcs_read(VMCS_GUEST_RSP));
case 5:
return (vmxctx->guest_rbp);
case 6:
return (vmxctx->guest_rsi);
case 7:
return (vmxctx->guest_rdi);
case 8:
return (vmxctx->guest_r8);
case 9:
return (vmxctx->guest_r9);
case 10:
return (vmxctx->guest_r10);
case 11:
return (vmxctx->guest_r11);
case 12:
return (vmxctx->guest_r12);
case 13:
return (vmxctx->guest_r13);
case 14:
return (vmxctx->guest_r14);
case 15:
return (vmxctx->guest_r15);
default:
panic("invalid vmx register %d", ident);
}
}
static void
vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
{
struct vmxctx *vmxctx;
vmxctx = &vmx->ctx[vcpu];
switch (ident) {
case 0:
vmxctx->guest_rax = regval;
break;
case 1:
vmxctx->guest_rcx = regval;
break;
case 2:
vmxctx->guest_rdx = regval;
break;
case 3:
vmxctx->guest_rbx = regval;
break;
case 4:
vmcs_write(VMCS_GUEST_RSP, regval);
break;
case 5:
vmxctx->guest_rbp = regval;
break;
case 6:
vmxctx->guest_rsi = regval;
break;
case 7:
vmxctx->guest_rdi = regval;
break;
case 8:
vmxctx->guest_r8 = regval;
break;
case 9:
vmxctx->guest_r9 = regval;
break;
case 10:
vmxctx->guest_r10 = regval;
break;
case 11:
vmxctx->guest_r11 = regval;
break;
case 12:
vmxctx->guest_r12 = regval;
break;
case 13:
vmxctx->guest_r13 = regval;
break;
case 14:
vmxctx->guest_r14 = regval;
break;
case 15:
vmxctx->guest_r15 = regval;
break;
default:
panic("invalid vmx register %d", ident);
}
}
static int
vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
{
uint64_t crval, regval;
/* We only handle mov to %cr0 at this time */
if ((exitqual & 0xf0) != 0x00)
return (UNHANDLED);
cr = exitqual & 0xf;
if (cr != 0 && cr != 4)
return (UNHANDLED);
regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
regval = 0; /* silence gcc */
vmxctx = &vmx->ctx[vcpu];
vmcs_write(VMCS_CR0_SHADOW, regval);
/*
* We must use vmcs_write() directly here because vmcs_setreg() will
* call vmclear(vmcs) as a side-effect which we certainly don't want.
*/
switch ((exitqual >> 8) & 0xf) {
case 0:
regval = vmxctx->guest_rax;
break;
case 1:
regval = vmxctx->guest_rcx;
break;
case 2:
regval = vmxctx->guest_rdx;
break;
case 3:
regval = vmxctx->guest_rbx;
break;
case 4:
regval = vmcs_read(VMCS_GUEST_RSP);
break;
case 5:
regval = vmxctx->guest_rbp;
break;
case 6:
regval = vmxctx->guest_rsi;
break;
case 7:
regval = vmxctx->guest_rdi;
break;
case 8:
regval = vmxctx->guest_r8;
break;
case 9:
regval = vmxctx->guest_r9;
break;
case 10:
regval = vmxctx->guest_r10;
break;
case 11:
regval = vmxctx->guest_r11;
break;
case 12:
regval = vmxctx->guest_r12;
break;
case 13:
regval = vmxctx->guest_r13;
break;
case 14:
regval = vmxctx->guest_r14;
break;
case 15:
regval = vmxctx->guest_r15;
break;
}
crval = regval | cr0_ones_mask;
crval &= ~cr0_zeros_mask;
vmcs_write(VMCS_GUEST_CR0, crval);
if (cr == 0) {
ones_mask = cr0_ones_mask;
zeros_mask = cr0_zeros_mask;
vmcs_guest_cr = VMCS_GUEST_CR0;
vmcs_shadow_cr = VMCS_CR0_SHADOW;
} else {
ones_mask = cr4_ones_mask;
zeros_mask = cr4_zeros_mask;
vmcs_guest_cr = VMCS_GUEST_CR4;
vmcs_shadow_cr = VMCS_CR4_SHADOW;
}
vmcs_write(vmcs_shadow_cr, regval);
crval = regval | ones_mask;
crval &= ~zeros_mask;
vmcs_write(vmcs_guest_cr, crval);
if (cr == 0 && regval & CR0_PG) {
if (regval & CR0_PG) {
uint64_t efer, entry_ctls;
/*
@ -1537,6 +1579,48 @@ vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
return (HANDLED);
}
static int
vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
{
uint64_t crval, regval;
/* We only handle mov to %cr4 at this time */
if ((exitqual & 0xf0) != 0x00)
return (UNHANDLED);
regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
vmcs_write(VMCS_CR4_SHADOW, regval);
crval = regval | cr4_ones_mask;
crval &= ~cr4_zeros_mask;
vmcs_write(VMCS_GUEST_CR4, crval);
return (HANDLED);
}
static int
vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
{
uint64_t regval;
/* We only handle mov %cr8 to/from a register at this time. */
if ((exitqual & 0xe0) != 0x00) {
return (UNHANDLED);
}
if (exitqual & 0x10) {
regval = vlapic_get_tpr(vm_lapic(vmx->vm, vcpu));
vmx_set_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf,
regval >> 4);
} else {
regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
vlapic_set_tpr(vm_lapic(vmx->vm, vcpu), regval << 4);
}
return (HANDLED);
}
/*
* From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
*/
@ -1929,7 +2013,17 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
switch (reason) {
case EXIT_REASON_CR_ACCESS:
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
handled = vmx_emulate_cr_access(vmx, vcpu, qual);
switch (qual & 0xf) {
case 0:
handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
break;
case 4:
handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
break;
case 8:
handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
break;
}
break;
case EXIT_REASON_RDMSR:
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);

View File

@ -1184,7 +1184,7 @@ vlapic_read(struct vlapic *vlapic, int mmio_access, uint64_t offset,
*data = lapic->version;
break;
case APIC_OFFSET_TPR:
*data = lapic->tpr;
*data = vlapic_get_tpr(vlapic);
break;
case APIC_OFFSET_APR:
*data = lapic->apr;
@ -1305,8 +1305,7 @@ vlapic_write(struct vlapic *vlapic, int mmio_access, uint64_t offset,
vlapic_id_write_handler(vlapic);
break;
case APIC_OFFSET_TPR:
lapic->tpr = data & 0xff;
vlapic_update_ppr(vlapic);
vlapic_set_tpr(vlapic, data & 0xff);
break;
case APIC_OFFSET_EOI:
vlapic_process_eoi(vlapic);
@ -1611,3 +1610,20 @@ vlapic_set_tmr_level(struct vlapic *vlapic, uint32_t dest, bool phys,
VLAPIC_CTR1(vlapic, "vector %d set to level-triggered", vector);
vlapic_set_tmr(vlapic, vector, true);
}
void
vlapic_set_tpr(struct vlapic *vlapic, uint8_t val)
{
struct LAPIC *lapic = vlapic->apic_page;
lapic->tpr = val;
vlapic_update_ppr(vlapic);
}
uint8_t
vlapic_get_tpr(struct vlapic *vlapic)
{
struct LAPIC *lapic = vlapic->apic_page;
return (lapic->tpr);
}

View File

@ -92,6 +92,9 @@ void vlapic_reset_tmr(struct vlapic *vlapic);
void vlapic_set_tmr_level(struct vlapic *vlapic, uint32_t dest, bool phys,
int delmode, int vector);
void vlapic_set_tpr(struct vlapic *vlapic, uint8_t val);
uint8_t vlapic_get_tpr(struct vlapic *vlapic);
/* APIC write handlers */
void vlapic_id_write_handler(struct vlapic *vlapic);
void vlapic_ldr_write_handler(struct vlapic *vlapic);