From 277bdd9950b1e588bdc579e550c0a2503e78f7c0 Mon Sep 17 00:00:00 2001 From: Tycho Nightingale Date: Tue, 9 Jun 2015 00:14:47 +0000 Subject: [PATCH] Support guest writes to the TSC by enabling the "use TSC offsetting" execution control and writing the difference between the host TSC and the guest TSC into the TSC offset in the VMCS upon encountering a write. Reviewed by: neel --- sys/amd64/vmm/intel/vmx.c | 25 +++++++++++++++++++++---- sys/amd64/vmm/intel/vmx.h | 2 ++ sys/amd64/vmm/intel/vmx_msr.c | 3 +++ 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c index 7ee50d4fb30e..f590586c44b3 100644 --- a/sys/amd64/vmm/intel/vmx.c +++ b/sys/amd64/vmm/intel/vmx.c @@ -856,10 +856,11 @@ vmx_vminit(struct vm *vm, pmap_t pmap) * VM exit and entry respectively. It is also restored from the * host VMCS area on a VM exit. * - * The TSC MSR is exposed read-only. Writes are disallowed as that - * will impact the host TSC. - * XXX Writes would be implemented with a wrmsr trap, and - * then modifying the TSC offset in the VMCS. + * The TSC MSR is exposed read-only. Writes are disallowed as + * that will impact the host TSC. If the guest does a write + * the "use TSC offsetting" execution control is enabled and the + * difference between the host TSC and the guest TSC is written + * into the TSC offset in the VMCS. */ if (guest_msr_rw(vmx, MSR_GSBASE) || guest_msr_rw(vmx, MSR_FSBASE) || @@ -1130,6 +1131,22 @@ vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting"); } +int +vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset) +{ + int error; + + if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) { + vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET; + vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); + VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting"); + } + + error = vmwrite(VMCS_TSC_OFFSET, offset); + + return (error); +} + #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h index bc488614ff74..57f5b28461f6 100644 --- a/sys/amd64/vmm/intel/vmx.h +++ b/sys/amd64/vmm/intel/vmx.h @@ -135,6 +135,8 @@ void vmx_call_isr(uintptr_t entry); u_long vmx_fix_cr0(u_long cr0); u_long vmx_fix_cr4(u_long cr4); +int vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset); + extern char vmx_exit_guest[]; #endif diff --git a/sys/amd64/vmm/intel/vmx_msr.c b/sys/amd64/vmm/intel/vmx_msr.c index 3091f687d2c0..91b2c01b24e2 100644 --- a/sys/amd64/vmm/intel/vmx_msr.c +++ b/sys/amd64/vmm/intel/vmx_msr.c @@ -474,6 +474,9 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu) else vm_inject_gp(vmx->vm, vcpuid); break; + case MSR_TSC: + error = vmx_set_tsc_offset(vmx, vcpuid, val - rdtsc()); + break; default: error = EINVAL; break;