vmm: Pass vcpu instead of vm and vcpuid to APIs used from CPU backends.

Reviewed by:	corvink, markj
Differential Revision:	https://reviews.freebsd.org/D37162
This commit is contained in:
John Baldwin 2022-11-18 10:03:05 -08:00
parent d3956e4673
commit 80cb5d845b
16 changed files with 199 additions and 280 deletions

View File

@ -258,7 +258,7 @@ void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
int prot, void **cookie); int prot, void **cookie);
void vm_gpa_release(void *cookie); void vm_gpa_release(void *cookie);
bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa); bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval); int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val); int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
@ -269,11 +269,11 @@ int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
int vm_run(struct vm *vm, struct vm_run *vmrun); int vm_run(struct vm *vm, struct vm_run *vmrun);
int vm_suspend(struct vm *vm, enum vm_suspend_how how); int vm_suspend(struct vm *vm, enum vm_suspend_how how);
int vm_inject_nmi(struct vm *vm, int vcpu); int vm_inject_nmi(struct vm *vm, int vcpu);
int vm_nmi_pending(struct vm *vm, int vcpuid); int vm_nmi_pending(struct vcpu *vcpu);
void vm_nmi_clear(struct vm *vm, int vcpuid); void vm_nmi_clear(struct vcpu *vcpu);
int vm_inject_extint(struct vm *vm, int vcpu); int vm_inject_extint(struct vm *vm, int vcpu);
int vm_extint_pending(struct vm *vm, int vcpuid); int vm_extint_pending(struct vcpu *vcpu);
void vm_extint_clear(struct vm *vm, int vcpuid); void vm_extint_clear(struct vcpu *vcpu);
int vcpu_vcpuid(struct vcpu *vcpu); int vcpu_vcpuid(struct vcpu *vcpu);
struct vm *vcpu_vm(struct vcpu *vcpu); struct vm *vcpu_vm(struct vcpu *vcpu);
struct vcpu *vm_vcpu(struct vm *vm, int cpu); struct vcpu *vm_vcpu(struct vm *vm, int cpu);
@ -289,12 +289,12 @@ int vm_activate_cpu(struct vm *vm, int vcpu);
int vm_suspend_cpu(struct vm *vm, int vcpu); int vm_suspend_cpu(struct vm *vm, int vcpu);
int vm_resume_cpu(struct vm *vm, int vcpu); int vm_resume_cpu(struct vm *vm, int vcpu);
int vm_restart_instruction(struct vcpu *vcpu); int vm_restart_instruction(struct vcpu *vcpu);
struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid); struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip); void vm_exit_suspended(struct vcpu *vcpu, uint64_t rip);
void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip); void vm_exit_debug(struct vcpu *vcpu, uint64_t rip);
void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip); void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip);
void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip); void vm_exit_astpending(struct vcpu *vcpu, uint64_t rip);
void vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip); void vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip);
int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta); int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta);
int vm_restore_time(struct vm *vm); int vm_restore_time(struct vm *vm);
@ -342,7 +342,7 @@ vcpu_reqidle(struct vm_eventinfo *info)
return (*info->iptr); return (*info->iptr);
} }
int vcpu_debugged(struct vm *vm, int vcpuid); int vcpu_debugged(struct vcpu *vcpu);
/* /*
* Return true if device indicated by bus/slot/func is supposed to be a * Return true if device indicated by bus/slot/func is supposed to be a
@ -366,14 +366,14 @@ int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu); enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
static int __inline static int __inline
vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu) vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
{ {
return (vcpu_get_state(vm_vcpu(vm, vcpu), hostcpu) == VCPU_RUNNING); return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
} }
#ifdef _SYS_PROC_H_ #ifdef _SYS_PROC_H_
static int __inline static int __inline
vcpu_should_yield(struct vm *vm, int vcpu) vcpu_should_yield(struct vcpu *vcpu)
{ {
struct thread *td; struct thread *td;
@ -417,7 +417,7 @@ int vm_inject_exception(struct vcpu *vcpu, int vector, int err_valid,
* *
* Return value is 0 on success and non-zero on failure. * Return value is 0 on success and non-zero on failure.
*/ */
int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo); int vm_exit_intinfo(struct vcpu *vcpu, uint64_t intinfo);
/* /*
* This function is called before every VM-entry to retrieve a pending * This function is called before every VM-entry to retrieve a pending
@ -427,7 +427,7 @@ int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
* Returns 0 if there are no events that need to be injected into the guest * Returns 0 if there are no events that need to be injected into the guest
* and non-zero otherwise. * and non-zero otherwise.
*/ */
int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info); int vm_entry_intinfo(struct vcpu *vcpu, uint64_t *info);
int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2); int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
@ -435,10 +435,8 @@ int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
* Function used to keep track of the guest's TSC offset. The * Function used to keep track of the guest's TSC offset. The
* offset is used by the virutalization extensions to provide a consistent * offset is used by the virutalization extensions to provide a consistent
* value for the Time Stamp Counter to the guest. * value for the Time Stamp Counter to the guest.
*
* Return value is 0 on success and non-zero on failure.
*/ */
int vm_set_tsc_offset(struct vm *vm, int vcpu_id, uint64_t offset); void vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset);
enum vm_reg_name vm_segment_name(int seg_encoding); enum vm_reg_name vm_segment_name(int seg_encoding);
@ -470,8 +468,8 @@ void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo);
void vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len); void vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len);
void vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len); void vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len);
int vcpu_trace_exceptions(struct vm *vm, int vcpuid); int vcpu_trace_exceptions(struct vcpu *vcpu);
int vcpu_trap_wbinvd(struct vm *vm, int vcpuid); int vcpu_trap_wbinvd(struct vcpu *vcpu);
#endif /* KERNEL */ #endif /* KERNEL */
#ifdef _KERNEL #ifdef _KERNEL
@ -790,27 +788,27 @@ void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
int errcode); int errcode);
static __inline void static __inline void
vm_inject_ud(void *vm, int vcpuid) vm_inject_ud(struct vcpu *vcpu)
{ {
vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0); vm_inject_fault(vcpu, IDT_UD, 0, 0);
} }
static __inline void static __inline void
vm_inject_gp(void *vm, int vcpuid) vm_inject_gp(struct vcpu *vcpu)
{ {
vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0); vm_inject_fault(vcpu, IDT_GP, 1, 0);
} }
static __inline void static __inline void
vm_inject_ac(void *vm, int vcpuid, int errcode) vm_inject_ac(struct vcpu *vcpu, int errcode)
{ {
vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode); vm_inject_fault(vcpu, IDT_AC, 1, errcode);
} }
static __inline void static __inline void
vm_inject_ss(void *vm, int vcpuid, int errcode) vm_inject_ss(struct vcpu *vcpu, int errcode)
{ {
vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode); vm_inject_fault(vcpu, IDT_SS, 1, errcode);
} }
void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2); void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);

View File

@ -282,10 +282,9 @@ svm_modresume(void)
} }
#ifdef BHYVE_SNAPSHOT #ifdef BHYVE_SNAPSHOT
int void
svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t offset) svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset)
{ {
int error;
struct vmcb_ctrl *ctrl; struct vmcb_ctrl *ctrl;
ctrl = svm_get_vmcb_ctrl(vcpu); ctrl = svm_get_vmcb_ctrl(vcpu);
@ -294,9 +293,7 @@ svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t offset)
svm_set_dirty(vcpu, VMCB_CACHE_I); svm_set_dirty(vcpu, VMCB_CACHE_I);
SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset); SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset);
error = vm_set_tsc_offset(sc->vm, vcpu->vcpuid, offset); vm_set_tsc_offset(vcpu->vcpu, offset);
return (error);
} }
#endif #endif
@ -464,7 +461,7 @@ vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
* Intercept everything when tracing guest exceptions otherwise * Intercept everything when tracing guest exceptions otherwise
* just intercept machine check exception. * just intercept machine check exception.
*/ */
if (vcpu_trace_exceptions(sc->vm, vcpu->vcpuid)) { if (vcpu_trace_exceptions(vcpu->vcpu)) {
for (n = 0; n < 32; n++) { for (n = 0; n < 32; n++) {
/* /*
* Skip unimplemented vectors in the exception bitmap. * Skip unimplemented vectors in the exception bitmap.
@ -504,7 +501,7 @@ vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP); svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
if (vcpu_trap_wbinvd(sc->vm, vcpu->vcpuid)) { if (vcpu_trap_wbinvd(vcpu->vcpu)) {
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT,
VMCB_INTCPT_WBINVD); VMCB_INTCPT_WBINVD);
} }
@ -992,9 +989,7 @@ svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
{ {
struct vmcb_ctrl *ctrl; struct vmcb_ctrl *ctrl;
uint64_t intinfo; uint64_t intinfo;
int vcpuid;
vcpuid = vcpu->vcpuid;
ctrl = svm_get_vmcb_ctrl(vcpu); ctrl = svm_get_vmcb_ctrl(vcpu);
intinfo = ctrl->exitintinfo; intinfo = ctrl->exitintinfo;
if (!VMCB_EXITINTINFO_VALID(intinfo)) if (!VMCB_EXITINTINFO_VALID(intinfo))
@ -1009,7 +1004,7 @@ svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo,
VMCB_EXITINTINFO_VECTOR(intinfo)); VMCB_EXITINTINFO_VECTOR(intinfo));
vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1); vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1);
vm_exit_intinfo(svm_sc->vm, vcpuid, intinfo); vm_exit_intinfo(vcpu->vcpu, intinfo);
} }
#ifdef INVARIANTS #ifdef INVARIANTS
@ -1149,10 +1144,9 @@ svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
struct vm_exit *vme; struct vm_exit *vme;
struct vmcb_state *state; struct vmcb_state *state;
uint64_t changed, lma, oldval; uint64_t changed, lma, oldval;
int error __diagused, vcpuid; int error __diagused;
state = svm_get_vmcb_state(vcpu); state = svm_get_vmcb_state(vcpu);
vcpuid = vcpu->vcpuid;
oldval = state->efer; oldval = state->efer;
SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
@ -1179,7 +1173,7 @@ svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
goto gpf; goto gpf;
if (newval & EFER_NXE) { if (newval & EFER_NXE) {
if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_NO_EXECUTE)) if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE))
goto gpf; goto gpf;
} }
@ -1188,19 +1182,19 @@ svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
* this is fixed flag guest attempt to set EFER_LMSLE as an error. * this is fixed flag guest attempt to set EFER_LMSLE as an error.
*/ */
if (newval & EFER_LMSLE) { if (newval & EFER_LMSLE) {
vme = vm_exitinfo(sc->vm, vcpuid); vme = vm_exitinfo(vcpu->vcpu);
vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0);
*retu = true; *retu = true;
return (0); return (0);
} }
if (newval & EFER_FFXSR) { if (newval & EFER_FFXSR) {
if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_FFXSR)) if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR))
goto gpf; goto gpf;
} }
if (newval & EFER_TCE) { if (newval & EFER_TCE) {
if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_TCE)) if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE))
goto gpf; goto gpf;
} }
@ -1219,18 +1213,17 @@ emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
int error; int error;
if (lapic_msr(num)) if (lapic_msr(num))
error = lapic_wrmsr(sc->vm, vcpu->vcpuid, num, val, retu); error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
else if (num == MSR_EFER) else if (num == MSR_EFER)
error = svm_write_efer(sc, vcpu, val, retu); error = svm_write_efer(sc, vcpu, val, retu);
else else
error = svm_wrmsr(sc, vcpu, num, val, retu); error = svm_wrmsr(vcpu, num, val, retu);
return (error); return (error);
} }
static int static int
emulate_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu)
bool *retu)
{ {
struct vmcb_state *state; struct vmcb_state *state;
struct svm_regctx *ctx; struct svm_regctx *ctx;
@ -1238,9 +1231,9 @@ emulate_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
int error; int error;
if (lapic_msr(num)) if (lapic_msr(num))
error = lapic_rdmsr(sc->vm, vcpu->vcpuid, num, &result, retu); error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
else else
error = svm_rdmsr(sc, vcpu, num, &result, retu); error = svm_rdmsr(vcpu, num, &result, retu);
if (error == 0) { if (error == 0) {
state = svm_get_vmcb_state(vcpu); state = svm_get_vmcb_state(vcpu);
@ -1335,14 +1328,12 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
uint64_t code, info1, info2, val; uint64_t code, info1, info2, val;
uint32_t eax, ecx, edx; uint32_t eax, ecx, edx;
int error __diagused, errcode_valid, handled, idtvec, reflect; int error __diagused, errcode_valid, handled, idtvec, reflect;
int vcpuid;
bool retu; bool retu;
ctx = svm_get_guest_regctx(vcpu); ctx = svm_get_guest_regctx(vcpu);
vmcb = svm_get_vmcb(vcpu); vmcb = svm_get_vmcb(vcpu);
state = &vmcb->state; state = &vmcb->state;
ctrl = &vmcb->ctrl; ctrl = &vmcb->ctrl;
vcpuid = vcpu->vcpuid;
handled = 0; handled = 0;
code = ctrl->exitcode; code = ctrl->exitcode;
@ -1487,7 +1478,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
} else { } else {
SVM_CTR1(vcpu, "rdmsr %#x", ecx); SVM_CTR1(vcpu, "rdmsr %#x", ecx);
vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1); vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { if (emulate_rdmsr(vcpu, ecx, &retu)) {
vmexit->exitcode = VM_EXITCODE_RDMSR; vmexit->exitcode = VM_EXITCODE_RDMSR;
vmexit->u.msr.code = ecx; vmexit->u.msr.code = ecx;
} else if (!retu) { } else if (!retu) {
@ -1504,8 +1495,9 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
break; break;
case VMCB_EXIT_CPUID: case VMCB_EXIT_CPUID:
vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
handled = x86_emulate_cpuid(svm_sc->vm, vcpuid, &state->rax, handled = x86_emulate_cpuid(vcpu->vcpu,
&ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx); &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx,
&ctx->sctx_rdx);
break; break;
case VMCB_EXIT_HLT: case VMCB_EXIT_HLT:
vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1); vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
@ -1522,7 +1514,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
SVM_CTR2(vcpu, "nested page fault with " SVM_CTR2(vcpu, "nested page fault with "
"reserved bits set: info1(%#lx) info2(%#lx)", "reserved bits set: info1(%#lx) info2(%#lx)",
info1, info2); info1, info2);
} else if (vm_mem_allocated(svm_sc->vm, vcpuid, info2)) { } else if (vm_mem_allocated(vcpu->vcpu, info2)) {
vmexit->exitcode = VM_EXITCODE_PAGING; vmexit->exitcode = VM_EXITCODE_PAGING;
vmexit->u.paging.gpa = info2; vmexit->u.paging.gpa = info2;
vmexit->u.paging.fault_type = npf_fault_type(info1); vmexit->u.paging.fault_type = npf_fault_type(info1);
@ -1596,9 +1588,8 @@ static void
svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu) svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
{ {
uint64_t intinfo; uint64_t intinfo;
int vcpuid = vcpu->vcpuid;
if (!vm_entry_intinfo(svm_sc->vm, vcpuid, &intinfo)) if (!vm_entry_intinfo(vcpu->vcpu, &intinfo))
return; return;
KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
@ -1624,7 +1615,6 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
uint8_t v_tpr; uint8_t v_tpr;
int vector, need_intr_window; int vector, need_intr_window;
int extint_pending; int extint_pending;
int vcpuid = vcpu->vcpuid;
state = svm_get_vmcb_state(vcpu); state = svm_get_vmcb_state(vcpu);
ctrl = svm_get_vmcb_ctrl(vcpu); ctrl = svm_get_vmcb_ctrl(vcpu);
@ -1650,7 +1640,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
svm_inj_intinfo(sc, vcpu); svm_inj_intinfo(sc, vcpu);
/* NMI event has priority over interrupts. */ /* NMI event has priority over interrupts. */
if (vm_nmi_pending(sc->vm, vcpuid)) { if (vm_nmi_pending(vcpu->vcpu)) {
if (nmi_blocked(vcpu)) { if (nmi_blocked(vcpu)) {
/* /*
* Can't inject another NMI if the guest has not * Can't inject another NMI if the guest has not
@ -1686,7 +1676,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
*/ */
ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */
} else { } else {
vm_nmi_clear(sc->vm, vcpuid); vm_nmi_clear(vcpu->vcpu);
/* Inject NMI, vector number is not used */ /* Inject NMI, vector number is not used */
svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI, svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI,
@ -1699,7 +1689,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
} }
} }
extint_pending = vm_extint_pending(sc->vm, vcpuid); extint_pending = vm_extint_pending(vcpu->vcpu);
if (!extint_pending) { if (!extint_pending) {
if (!vlapic_pending_intr(vlapic, &vector)) if (!vlapic_pending_intr(vlapic, &vector))
goto done; goto done;
@ -1742,7 +1732,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
if (!extint_pending) { if (!extint_pending) {
vlapic_intr_accepted(vlapic, vector); vlapic_intr_accepted(vlapic, vector);
} else { } else {
vm_extint_clear(sc->vm, vcpuid); vm_extint_clear(vcpu->vcpu);
vatpic_intr_accepted(sc->vm, vector); vatpic_intr_accepted(sc->vm, vector);
} }
@ -2003,18 +1993,15 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
struct vmcb_ctrl *ctrl; struct vmcb_ctrl *ctrl;
struct vm_exit *vmexit; struct vm_exit *vmexit;
struct vlapic *vlapic; struct vlapic *vlapic;
struct vm *vm;
uint64_t vmcb_pa; uint64_t vmcb_pa;
int handled, vcpuid; int handled;
uint16_t ldt_sel; uint16_t ldt_sel;
vcpu = vcpui; vcpu = vcpui;
vcpuid = vcpu->vcpuid;
svm_sc = vcpu->sc; svm_sc = vcpu->sc;
vm = svm_sc->vm;
state = svm_get_vmcb_state(vcpu); state = svm_get_vmcb_state(vcpu);
ctrl = svm_get_vmcb_ctrl(vcpu); ctrl = svm_get_vmcb_ctrl(vcpu);
vmexit = vm_exitinfo(vm, vcpuid); vmexit = vm_exitinfo(vcpu->vcpu);
vlapic = vm_lapic(vcpu->vcpu); vlapic = vm_lapic(vcpu->vcpu);
gctx = svm_get_guest_regctx(vcpu); gctx = svm_get_guest_regctx(vcpu);
@ -2045,7 +2032,7 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1); vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
} }
svm_msr_guest_enter(svm_sc, vcpu); svm_msr_guest_enter(vcpu);
/* Update Guest RIP */ /* Update Guest RIP */
state->rip = rip; state->rip = rip;
@ -2062,32 +2049,32 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
if (vcpu_suspended(evinfo)) { if (vcpu_suspended(evinfo)) {
enable_gintr(); enable_gintr();
vm_exit_suspended(vm, vcpuid, state->rip); vm_exit_suspended(vcpu->vcpu, state->rip);
break; break;
} }
if (vcpu_rendezvous_pending(evinfo)) { if (vcpu_rendezvous_pending(evinfo)) {
enable_gintr(); enable_gintr();
vm_exit_rendezvous(vm, vcpuid, state->rip); vm_exit_rendezvous(vcpu->vcpu, state->rip);
break; break;
} }
if (vcpu_reqidle(evinfo)) { if (vcpu_reqidle(evinfo)) {
enable_gintr(); enable_gintr();
vm_exit_reqidle(vm, vcpuid, state->rip); vm_exit_reqidle(vcpu->vcpu, state->rip);
break; break;
} }
/* We are asked to give the cpu by scheduler. */ /* We are asked to give the cpu by scheduler. */
if (vcpu_should_yield(vm, vcpuid)) { if (vcpu_should_yield(vcpu->vcpu)) {
enable_gintr(); enable_gintr();
vm_exit_astpending(vm, vcpuid, state->rip); vm_exit_astpending(vcpu->vcpu, state->rip);
break; break;
} }
if (vcpu_debugged(vm, vcpuid)) { if (vcpu_debugged(vcpu->vcpu)) {
enable_gintr(); enable_gintr();
vm_exit_debug(vm, vcpuid, state->rip); vm_exit_debug(vcpu->vcpu, state->rip);
break; break;
} }
@ -2140,7 +2127,7 @@ svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
handled = svm_vmexit(svm_sc, vcpu, vmexit); handled = svm_vmexit(svm_sc, vcpu, vmexit);
} while (handled); } while (handled);
svm_msr_guest_exit(svm_sc, vcpu); svm_msr_guest_exit(vcpu);
return (0); return (0);
} }
@ -2446,7 +2433,7 @@ svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta)
vcpu = vcpui; vcpu = vcpui;
err = 0; err = 0;
running = vcpu_is_running(vcpu->sc->vm, vcpu->vcpuid, &hostcpu); running = vcpu_is_running(vcpu->vcpu, &hostcpu);
if (running && hostcpu != curcpu) { if (running && hostcpu != curcpu) {
printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm), printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm),
vcpu->vcpuid); vcpu->vcpuid);
@ -2642,11 +2629,10 @@ static int
svm_restore_tsc(void *vcpui, uint64_t offset) svm_restore_tsc(void *vcpui, uint64_t offset)
{ {
struct svm_vcpu *vcpu = vcpui; struct svm_vcpu *vcpu = vcpui;
int err;
err = svm_set_tsc_offset(vcpu->sc, vcpu, offset); svm_set_tsc_offset(vcpu, offset);
return (err); return (0);
} }
#endif #endif

View File

@ -69,8 +69,7 @@ struct svm_regctx {
void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu); void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu);
#ifdef BHYVE_SNAPSHOT #ifdef BHYVE_SNAPSHOT
int svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu, void svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset);
uint64_t offset);
#endif #endif
#endif /* _SVM_H_ */ #endif /* _SVM_H_ */

View File

@ -86,7 +86,7 @@ svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu)
} }
void void
svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu) svm_msr_guest_enter(struct svm_vcpu *vcpu)
{ {
/* /*
* Save host MSRs (if any) and restore guest MSRs (if any). * Save host MSRs (if any) and restore guest MSRs (if any).
@ -94,7 +94,7 @@ svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu)
} }
void void
svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu) svm_msr_guest_exit(struct svm_vcpu *vcpu)
{ {
/* /*
* Save guest MSRs (if any) and restore host MSRs. * Save guest MSRs (if any) and restore host MSRs.
@ -108,8 +108,7 @@ svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu)
} }
int int
svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, svm_rdmsr(struct svm_vcpu *vcpu, u_int num, uint64_t *result, bool *retu)
uint64_t *result, bool *retu)
{ {
int error = 0; int error = 0;
@ -142,8 +141,7 @@ svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
} }
int int
svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, uint64_t val, svm_wrmsr(struct svm_vcpu *vcpu, u_int num, uint64_t val, bool *retu)
bool *retu)
{ {
int error = 0; int error = 0;
@ -175,7 +173,7 @@ svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, uint64_t val,
break; break;
#ifdef BHYVE_SNAPSHOT #ifdef BHYVE_SNAPSHOT
case MSR_TSC: case MSR_TSC:
error = svm_set_tsc_offset(sc, vcpu, val - rdtsc()); svm_set_tsc_offset(vcpu, val - rdtsc());
break; break;
#endif #endif
case MSR_EXTFEATURES: case MSR_EXTFEATURES:

View File

@ -36,12 +36,10 @@ struct svm_vcpu;
void svm_msr_init(void); void svm_msr_init(void);
void svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu); void svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu);
void svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu); void svm_msr_guest_enter(struct svm_vcpu *vcpu);
void svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu); void svm_msr_guest_exit(struct svm_vcpu *vcpu);
int svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, int svm_wrmsr(struct svm_vcpu *vcpu, u_int num, uint64_t val, bool *retu);
uint64_t val, bool *retu); int svm_rdmsr(struct svm_vcpu *vcpu, u_int num, uint64_t *result, bool *retu);
int svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
uint64_t *result, bool *retu);
#endif /* _SVM_MSR_H_ */ #endif /* _SVM_MSR_H_ */

View File

@ -1148,7 +1148,7 @@ vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
error += vmwrite(VMCS_EPTP, vmx->eptp); error += vmwrite(VMCS_EPTP, vmx->eptp);
error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
if (vcpu_trap_wbinvd(vmx->vm, vcpuid)) { if (vcpu_trap_wbinvd(vcpu->vcpu)) {
KASSERT(cap_wbinvd_exit, ("WBINVD trap not available")); KASSERT(cap_wbinvd_exit, ("WBINVD trap not available"));
procbased_ctls2 |= PROCBASED2_WBINVD_EXITING; procbased_ctls2 |= PROCBASED2_WBINVD_EXITING;
} }
@ -1168,7 +1168,7 @@ vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
} }
/* exception bitmap */ /* exception bitmap */
if (vcpu_trace_exceptions(vmx->vm, vcpuid)) if (vcpu_trace_exceptions(vcpu->vcpu))
exc_bitmap = 0xffffffff; exc_bitmap = 0xffffffff;
else else
exc_bitmap = 1 << IDT_MC; exc_bitmap = 1 << IDT_MC;
@ -1226,11 +1226,11 @@ vmx_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
} }
static int static int
vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) vmx_handle_cpuid(struct vmx_vcpu *vcpu, struct vmxctx *vmxctx)
{ {
int handled; int handled;
handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, handled = x86_emulate_cpuid(vcpu->vcpu, (uint64_t *)&vmxctx->guest_rax,
(uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx,
(uint64_t *)&vmxctx->guest_rdx); (uint64_t *)&vmxctx->guest_rdx);
return (handled); return (handled);
@ -1395,7 +1395,7 @@ vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu)
} }
int int
vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t offset) vmx_set_tsc_offset(struct vmx_vcpu *vcpu, uint64_t offset)
{ {
int error; int error;
@ -1408,7 +1408,7 @@ vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t offset)
error = vmwrite(VMCS_TSC_OFFSET, offset); error = vmwrite(VMCS_TSC_OFFSET, offset);
#ifdef BHYVE_SNAPSHOT #ifdef BHYVE_SNAPSHOT
if (error == 0) if (error == 0)
error = vm_set_tsc_offset(vmx->vm, vcpu->vcpuid, offset); vm_set_tsc_offset(vcpu->vcpu, offset);
#endif #endif
return (error); return (error);
} }
@ -1419,7 +1419,7 @@ vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t offset)
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
static void static void
vmx_inject_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu) vmx_inject_nmi(struct vmx_vcpu *vcpu)
{ {
uint32_t gi __diagused, info; uint32_t gi __diagused, info;
@ -1441,12 +1441,12 @@ vmx_inject_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu)
VMX_CTR0(vcpu, "Injecting vNMI"); VMX_CTR0(vcpu, "Injecting vNMI");
/* Clear the request */ /* Clear the request */
vm_nmi_clear(vmx->vm, vcpu->vcpuid); vm_nmi_clear(vcpu->vcpu);
} }
static void static void
vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu, vmx_inject_interrupts(struct vmx_vcpu *vcpu, struct vlapic *vlapic,
struct vlapic *vlapic, uint64_t guestrip) uint64_t guestrip)
{ {
int vector, need_nmi_exiting, extint_pending; int vector, need_nmi_exiting, extint_pending;
uint64_t rflags, entryinfo; uint64_t rflags, entryinfo;
@ -1463,7 +1463,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
} }
} }
if (vm_entry_intinfo(vmx->vm, vcpu->vcpuid, &entryinfo)) { if (vm_entry_intinfo(vcpu->vcpu, &entryinfo)) {
KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
"intinfo is not valid: %#lx", __func__, entryinfo)); "intinfo is not valid: %#lx", __func__, entryinfo));
@ -1488,7 +1488,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
vmcs_write(VMCS_ENTRY_INTR_INFO, info); vmcs_write(VMCS_ENTRY_INTR_INFO, info);
} }
if (vm_nmi_pending(vmx->vm, vcpu->vcpuid)) { if (vm_nmi_pending(vcpu->vcpu)) {
/* /*
* If there are no conditions blocking NMI injection then * If there are no conditions blocking NMI injection then
* inject it directly here otherwise enable "NMI window * inject it directly here otherwise enable "NMI window
@ -1505,7 +1505,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
info = vmcs_read(VMCS_ENTRY_INTR_INFO); info = vmcs_read(VMCS_ENTRY_INTR_INFO);
if ((info & VMCS_INTR_VALID) == 0) { if ((info & VMCS_INTR_VALID) == 0) {
vmx_inject_nmi(vmx, vcpu); vmx_inject_nmi(vcpu);
need_nmi_exiting = 0; need_nmi_exiting = 0;
} else { } else {
VMX_CTR1(vcpu, "Cannot inject NMI " VMX_CTR1(vcpu, "Cannot inject NMI "
@ -1520,7 +1520,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
vmx_set_nmi_window_exiting(vcpu); vmx_set_nmi_window_exiting(vcpu);
} }
extint_pending = vm_extint_pending(vmx->vm, vcpu->vcpuid); extint_pending = vm_extint_pending(vcpu->vcpu);
if (!extint_pending && virtual_interrupt_delivery) { if (!extint_pending && virtual_interrupt_delivery) {
vmx_inject_pir(vlapic); vmx_inject_pir(vlapic);
@ -1553,7 +1553,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
("invalid vector %d from local APIC", vector)); ("invalid vector %d from local APIC", vector));
} else { } else {
/* Ask the legacy pic for a vector to inject */ /* Ask the legacy pic for a vector to inject */
vatpic_pending_intr(vmx->vm, &vector); vatpic_pending_intr(vcpu->vmx->vm, &vector);
/* /*
* From the Intel SDM, Volume 3, Section "Maskable * From the Intel SDM, Volume 3, Section "Maskable
@ -1603,8 +1603,8 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
/* Update the Local APIC ISR */ /* Update the Local APIC ISR */
vlapic_intr_accepted(vlapic, vector); vlapic_intr_accepted(vlapic, vector);
} else { } else {
vm_extint_clear(vmx->vm, vcpu->vcpuid); vm_extint_clear(vcpu->vcpu);
vatpic_intr_accepted(vmx->vm, vector); vatpic_intr_accepted(vcpu->vmx->vm, vector);
/* /*
* After we accepted the current ExtINT the PIC may * After we accepted the current ExtINT the PIC may
@ -2319,21 +2319,20 @@ vmx_task_switch_reason(uint64_t qual)
} }
static int static int
emulate_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val, emulate_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu)
bool *retu)
{ {
int error; int error;
if (lapic_msr(num)) if (lapic_msr(num))
error = lapic_wrmsr(vmx->vm, vcpu->vcpuid, num, val, retu); error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
else else
error = vmx_wrmsr(vmx, vcpu, num, val, retu); error = vmx_wrmsr(vcpu, num, val, retu);
return (error); return (error);
} }
static int static int
emulate_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, bool *retu) emulate_rdmsr(struct vmx_vcpu *vcpu, u_int num, bool *retu)
{ {
struct vmxctx *vmxctx; struct vmxctx *vmxctx;
uint64_t result; uint64_t result;
@ -2341,9 +2340,9 @@ emulate_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, bool *retu)
int error; int error;
if (lapic_msr(num)) if (lapic_msr(num))
error = lapic_rdmsr(vmx->vm, vcpu->vcpuid, num, &result, retu); error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
else else
error = vmx_rdmsr(vmx, vcpu, num, &result, retu); error = vmx_rdmsr(vcpu, num, &result, retu);
if (error == 0) { if (error == 0) {
eax = result; eax = result;
@ -2415,7 +2414,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
idtvec_err = vmcs_idt_vectoring_err(); idtvec_err = vmcs_idt_vectoring_err();
exitintinfo |= (uint64_t)idtvec_err << 32; exitintinfo |= (uint64_t)idtvec_err << 32;
} }
error = vm_exit_intinfo(vmx->vm, vcpuid, exitintinfo); error = vm_exit_intinfo(vcpu->vcpu, exitintinfo);
KASSERT(error == 0, ("%s: vm_set_intinfo error %d", KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
__func__, error)); __func__, error));
@ -2515,7 +2514,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
ecx = vmxctx->guest_rcx; ecx = vmxctx->guest_rcx;
VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx); VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx);
SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpuid, vmexit, ecx); SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpuid, vmexit, ecx);
error = emulate_rdmsr(vmx, vcpu, ecx, &retu); error = emulate_rdmsr(vcpu, ecx, &retu);
if (error) { if (error) {
vmexit->exitcode = VM_EXITCODE_RDMSR; vmexit->exitcode = VM_EXITCODE_RDMSR;
vmexit->u.msr.code = ecx; vmexit->u.msr.code = ecx;
@ -2537,8 +2536,8 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
ecx, (uint64_t)edx << 32 | eax); ecx, (uint64_t)edx << 32 | eax);
SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpuid, ecx, SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpuid, ecx,
(uint64_t)edx << 32 | eax); (uint64_t)edx << 32 | eax);
error = emulate_wrmsr(vmx, vcpu, ecx, error = emulate_wrmsr(vcpu, ecx, (uint64_t)edx << 32 | eax,
(uint64_t)edx << 32 | eax, &retu); &retu);
if (error) { if (error) {
vmexit->exitcode = VM_EXITCODE_WRMSR; vmexit->exitcode = VM_EXITCODE_WRMSR;
vmexit->u.msr.code = ecx; vmexit->u.msr.code = ecx;
@ -2612,8 +2611,8 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
case EXIT_REASON_NMI_WINDOW: case EXIT_REASON_NMI_WINDOW:
SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit); SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit);
/* Exit to allow the pending virtual NMI to be injected */ /* Exit to allow the pending virtual NMI to be injected */
if (vm_nmi_pending(vmx->vm, vcpuid)) if (vm_nmi_pending(vcpu->vcpu))
vmx_inject_nmi(vmx, vcpu); vmx_inject_nmi(vcpu);
vmx_clear_nmi_window_exiting(vcpu); vmx_clear_nmi_window_exiting(vcpu);
vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1); vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1);
return (1); return (1);
@ -2643,7 +2642,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
case EXIT_REASON_CPUID: case EXIT_REASON_CPUID:
vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1); vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit); SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit);
handled = vmx_handle_cpuid(vmx->vm, vcpuid, vmxctx); handled = vmx_handle_cpuid(vcpu, vmxctx);
break; break;
case EXIT_REASON_EXCEPTION: case EXIT_REASON_EXCEPTION:
vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1); vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
@ -2734,7 +2733,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
* this must be an instruction that accesses MMIO space. * this must be an instruction that accesses MMIO space.
*/ */
gpa = vmcs_gpa(); gpa = vmcs_gpa();
if (vm_mem_allocated(vmx->vm, vcpuid, gpa) || if (vm_mem_allocated(vcpu->vcpu, gpa) ||
apic_access_fault(vcpu, gpa)) { apic_access_fault(vcpu, gpa)) {
vmexit->exitcode = VM_EXITCODE_PAGING; vmexit->exitcode = VM_EXITCODE_PAGING;
vmexit->inst_length = 0; vmexit->inst_length = 0;
@ -3012,10 +3011,9 @@ vmx_pmap_deactivate(struct vmx *vmx, pmap_t pmap)
static int static int
vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
{ {
int rc, handled, launched, vcpuid; int rc, handled, launched;
struct vmx *vmx; struct vmx *vmx;
struct vmx_vcpu *vcpu; struct vmx_vcpu *vcpu;
struct vm *vm;
struct vmxctx *vmxctx; struct vmxctx *vmxctx;
struct vmcs *vmcs; struct vmcs *vmcs;
struct vm_exit *vmexit; struct vm_exit *vmexit;
@ -3026,18 +3024,16 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
vcpu = vcpui; vcpu = vcpui;
vmx = vcpu->vmx; vmx = vcpu->vmx;
vm = vmx->vm;
vcpuid = vcpu->vcpuid;
vmcs = vcpu->vmcs; vmcs = vcpu->vmcs;
vmxctx = &vcpu->ctx; vmxctx = &vcpu->ctx;
vlapic = vm_lapic(vcpu->vcpu); vlapic = vm_lapic(vcpu->vcpu);
vmexit = vm_exitinfo(vm, vcpuid); vmexit = vm_exitinfo(vcpu->vcpu);
launched = 0; launched = 0;
KASSERT(vmxctx->pmap == pmap, KASSERT(vmxctx->pmap == pmap,
("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
vmx_msr_guest_enter(vmx, vcpu); vmx_msr_guest_enter(vcpu);
VMPTRLD(vmcs); VMPTRLD(vmcs);
@ -3077,7 +3073,7 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
* pmap_invalidate_ept(). * pmap_invalidate_ept().
*/ */
disable_intr(); disable_intr();
vmx_inject_interrupts(vmx, vcpu, vlapic, rip); vmx_inject_interrupts(vcpu, vlapic, rip);
/* /*
* Check for vcpu suspension after injecting events because * Check for vcpu suspension after injecting events because
@ -3086,33 +3082,33 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
*/ */
if (vcpu_suspended(evinfo)) { if (vcpu_suspended(evinfo)) {
enable_intr(); enable_intr();
vm_exit_suspended(vmx->vm, vcpuid, rip); vm_exit_suspended(vcpu->vcpu, rip);
break; break;
} }
if (vcpu_rendezvous_pending(evinfo)) { if (vcpu_rendezvous_pending(evinfo)) {
enable_intr(); enable_intr();
vm_exit_rendezvous(vmx->vm, vcpuid, rip); vm_exit_rendezvous(vcpu->vcpu, rip);
break; break;
} }
if (vcpu_reqidle(evinfo)) { if (vcpu_reqidle(evinfo)) {
enable_intr(); enable_intr();
vm_exit_reqidle(vmx->vm, vcpuid, rip); vm_exit_reqidle(vcpu->vcpu, rip);
break; break;
} }
if (vcpu_should_yield(vm, vcpuid)) { if (vcpu_should_yield(vcpu->vcpu)) {
enable_intr(); enable_intr();
vm_exit_astpending(vmx->vm, vcpuid, rip); vm_exit_astpending(vcpu->vcpu, rip);
vmx_astpending_trace(vcpu, rip); vmx_astpending_trace(vcpu, rip);
handled = HANDLED; handled = HANDLED;
break; break;
} }
if (vcpu_debugged(vm, vcpuid)) { if (vcpu_debugged(vcpu->vcpu)) {
enable_intr(); enable_intr();
vm_exit_debug(vmx->vm, vcpuid, rip); vm_exit_debug(vcpu->vcpu, rip);
break; break;
} }
@ -3214,7 +3210,7 @@ vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
vmexit->exitcode); vmexit->exitcode);
VMCLEAR(vmcs); VMCLEAR(vmcs);
vmx_msr_guest_exit(vmx, vcpu); vmx_msr_guest_exit(vcpu);
return (0); return (0);
} }
@ -3390,7 +3386,7 @@ vmx_getreg(void *vcpui, int reg, uint64_t *retval)
struct vmx_vcpu *vcpu = vcpui; struct vmx_vcpu *vcpu = vcpui;
struct vmx *vmx = vcpu->vmx; struct vmx *vmx = vcpu->vmx;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); running = vcpu_is_running(vcpu->vcpu, &hostcpu);
if (running && hostcpu != curcpu) if (running && hostcpu != curcpu)
panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), panic("vmx_getreg: %s%d is running", vm_name(vmx->vm),
vcpu->vcpuid); vcpu->vcpuid);
@ -3413,7 +3409,7 @@ vmx_setreg(void *vcpui, int reg, uint64_t val)
struct vmx_vcpu *vcpu = vcpui; struct vmx_vcpu *vcpu = vcpui;
struct vmx *vmx = vcpu->vmx; struct vmx *vmx = vcpu->vmx;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); running = vcpu_is_running(vcpu->vcpu, &hostcpu);
if (running && hostcpu != curcpu) if (running && hostcpu != curcpu)
panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), panic("vmx_setreg: %s%d is running", vm_name(vmx->vm),
vcpu->vcpuid); vcpu->vcpuid);
@ -3480,7 +3476,7 @@ vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc)
struct vmx_vcpu *vcpu = vcpui; struct vmx_vcpu *vcpu = vcpui;
struct vmx *vmx = vcpu->vmx; struct vmx *vmx = vcpu->vmx;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); running = vcpu_is_running(vcpu->vcpu, &hostcpu);
if (running && hostcpu != curcpu) if (running && hostcpu != curcpu)
panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm),
vcpu->vcpuid); vcpu->vcpuid);
@ -3495,7 +3491,7 @@ vmx_setdesc(void *vcpui, int reg, struct seg_desc *desc)
struct vmx_vcpu *vcpu = vcpui; struct vmx_vcpu *vcpu = vcpui;
struct vmx *vmx = vcpu->vmx; struct vmx *vmx = vcpu->vmx;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); running = vcpu_is_running(vcpu->vcpu, &hostcpu);
if (running && hostcpu != curcpu) if (running && hostcpu != curcpu)
panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm),
vcpu->vcpuid); vcpu->vcpuid);
@ -3806,7 +3802,7 @@ vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
struct vm_exit *vmexit; struct vm_exit *vmexit;
uint8_t rvi, ppr; uint8_t rvi, ppr;
vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); vmexit = vm_exitinfo(vlapic->vcpu);
KASSERT(vmexit->exitcode == VM_EXITCODE_HLT, KASSERT(vmexit->exitcode == VM_EXITCODE_HLT,
("vmx_pending_intr: exitcode not 'HLT'")); ("vmx_pending_intr: exitcode not 'HLT'"));
rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT;
@ -3875,7 +3871,7 @@ vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
uint64_t mask, val; uint64_t mask, val;
KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), KASSERT(!vcpu_is_running(vlapic->vcpu, NULL),
("vmx_set_tmr: vcpu cannot be running")); ("vmx_set_tmr: vcpu cannot be running"));
vlapic_vtx = (struct vlapic_vtx *)vlapic; vlapic_vtx = (struct vlapic_vtx *)vlapic;
@ -4132,7 +4128,7 @@ vmx_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta)
vmx = vcpu->vmx; vmx = vcpu->vmx;
vmcs = vcpu->vmcs; vmcs = vcpu->vmcs;
run = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); run = vcpu_is_running(vcpu->vcpu, &hostcpu);
if (run && hostcpu != curcpu) { if (run && hostcpu != curcpu) {
printf("%s: %s%d is running", __func__, vm_name(vmx->vm), printf("%s: %s%d is running", __func__, vm_name(vmx->vm),
vcpu->vcpuid); vcpu->vcpuid);
@ -4235,7 +4231,7 @@ vmx_restore_tsc(void *vcpui, uint64_t offset)
vmx = vcpu->vmx; vmx = vcpu->vmx;
vmcs = vcpu->vmcs; vmcs = vcpu->vmcs;
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); running = vcpu_is_running(vcpu->vcpu, &hostcpu);
if (running && hostcpu != curcpu) { if (running && hostcpu != curcpu) {
printf("%s: %s%d is running", __func__, vm_name(vmx->vm), printf("%s: %s%d is running", __func__, vm_name(vmx->vm),
vcpu->vcpuid); vcpu->vcpuid);
@ -4245,7 +4241,7 @@ vmx_restore_tsc(void *vcpui, uint64_t offset)
if (!running) if (!running)
VMPTRLD(vmcs); VMPTRLD(vmcs);
error = vmx_set_tsc_offset(vmx, vcpu, offset); error = vmx_set_tsc_offset(vcpu, offset);
if (!running) if (!running)
VMCLEAR(vmcs); VMCLEAR(vmcs);

View File

@ -176,8 +176,7 @@ void vmx_call_isr(uintptr_t entry);
u_long vmx_fix_cr0(u_long cr0); u_long vmx_fix_cr0(u_long cr0);
u_long vmx_fix_cr4(u_long cr4); u_long vmx_fix_cr4(u_long cr4);
int vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, int vmx_set_tsc_offset(struct vmx_vcpu *vcpu, uint64_t offset);
uint64_t offset);
extern char vmx_exit_guest[]; extern char vmx_exit_guest[];
extern char vmx_exit_guest_flush_rsb[]; extern char vmx_exit_guest_flush_rsb[];

View File

@ -344,7 +344,7 @@ vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu)
} }
void void
vmx_msr_guest_enter(struct vmx *vmx, struct vmx_vcpu *vcpu) vmx_msr_guest_enter(struct vmx_vcpu *vcpu)
{ {
/* Save host MSRs (in particular, KGSBASE) and restore guest MSRs */ /* Save host MSRs (in particular, KGSBASE) and restore guest MSRs */
@ -367,7 +367,7 @@ vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu)
} }
void void
vmx_msr_guest_exit(struct vmx *vmx, struct vmx_vcpu *vcpu) vmx_msr_guest_exit(struct vmx_vcpu *vcpu)
{ {
/* Save guest MSRs */ /* Save guest MSRs */
@ -404,8 +404,7 @@ vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu)
} }
int int
vmx_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t *val, vmx_rdmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t *val, bool *retu)
bool *retu)
{ {
int error; int error;
@ -447,8 +446,7 @@ vmx_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t *val,
} }
int int
vmx_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val, vmx_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu)
bool *retu)
{ {
uint64_t changed; uint64_t changed;
int error; int error;
@ -496,7 +494,7 @@ vmx_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
vm_inject_gp(vcpu->vcpu); vm_inject_gp(vcpu->vcpu);
break; break;
case MSR_TSC: case MSR_TSC:
error = vmx_set_tsc_offset(vmx, vcpu, val - rdtsc()); error = vmx_set_tsc_offset(vcpu, val - rdtsc());
break; break;
case MSR_TSC_AUX: case MSR_TSC_AUX:
if (vmx_have_msr_tsc_aux) if (vmx_have_msr_tsc_aux)

View File

@ -36,13 +36,11 @@ struct vmx;
void vmx_msr_init(void); void vmx_msr_init(void);
void vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu); void vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu);
void vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu); void vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu);
void vmx_msr_guest_enter(struct vmx *vmx, struct vmx_vcpu *vcpu); void vmx_msr_guest_enter(struct vmx_vcpu *vcpu);
void vmx_msr_guest_exit(struct vmx *vmx, struct vmx_vcpu *vcpu); void vmx_msr_guest_exit(struct vmx_vcpu *vcpu);
void vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu); void vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu);
int vmx_rdmsr(struct vmx *, struct vmx_vcpu *vcpu, u_int num, uint64_t *val, int vmx_rdmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t *val, bool *retu);
bool *retu); int vmx_wrmsr(struct vmx_vcpu *vcpu, u_int num, uint64_t val, bool *retu);
int vmx_wrmsr(struct vmx *, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
bool *retu);
uint32_t vmx_revision(void); uint32_t vmx_revision(void);

View File

@ -1160,7 +1160,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
break; break;
vlapic2->boot_state = BS_RUNNING; vlapic2->boot_state = BS_RUNNING;
vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); vmexit = vm_exitinfo(vlapic->vcpu);
vmexit->exitcode = VM_EXITCODE_SPINUP_AP; vmexit->exitcode = VM_EXITCODE_SPINUP_AP;
vmexit->u.spinup_ap.vcpu = i; vmexit->u.spinup_ap.vcpu = i;
vmexit->u.spinup_ap.rip = vec << PAGE_SHIFT; vmexit->u.spinup_ap.rip = vec << PAGE_SHIFT;
@ -1187,7 +1187,7 @@ vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu)
} }
if (!CPU_EMPTY(&ipimask)) { if (!CPU_EMPTY(&ipimask)) {
vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); vmexit = vm_exitinfo(vlapic->vcpu);
vmexit->exitcode = VM_EXITCODE_IPI; vmexit->exitcode = VM_EXITCODE_IPI;
vmexit->u.ipi.mode = mode; vmexit->u.ipi.mode = mode;
vmexit->u.ipi.vector = vec; vmexit->u.ipi.vector = vec;

View File

@ -366,28 +366,21 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
} }
int int
vcpu_trace_exceptions(struct vm *vm, int vcpuid) vcpu_trace_exceptions(struct vcpu *vcpu)
{ {
return (trace_guest_exceptions); return (trace_guest_exceptions);
} }
int int
vcpu_trap_wbinvd(struct vm *vm, int vcpuid) vcpu_trap_wbinvd(struct vcpu *vcpu)
{ {
return (trap_wbinvd); return (trap_wbinvd);
} }
struct vm_exit * struct vm_exit *
vm_exitinfo(struct vm *vm, int cpuid) vm_exitinfo(struct vcpu *vcpu)
{ {
struct vcpu *vcpu;
if (cpuid < 0 || cpuid >= vm->maxcpus)
panic("vm_exitinfo: invalid cpuid %d", cpuid);
vcpu = &vm->vcpu[cpuid];
return (&vcpu->exitinfo); return (&vcpu->exitinfo);
} }
@ -680,14 +673,15 @@ vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
* an implicit lock on 'vm->mem_maps[]'. * an implicit lock on 'vm->mem_maps[]'.
*/ */
bool bool
vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa) vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
{ {
struct vm *vm = vcpu->vm;
struct mem_map *mm; struct mem_map *mm;
int i; int i;
#ifdef INVARIANTS #ifdef INVARIANTS
int hostcpu, state; int hostcpu, state;
state = vcpu_get_state(vm_vcpu(vm, vcpuid), &hostcpu); state = vcpu_get_state(vcpu, &hostcpu);
KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
#endif #endif
@ -1410,20 +1404,20 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
*/ */
if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
break; break;
if (vm_nmi_pending(vm, vcpuid)) if (vm_nmi_pending(vcpu))
break; break;
if (!intr_disabled) { if (!intr_disabled) {
if (vm_extint_pending(vm, vcpuid) || if (vm_extint_pending(vcpu) ||
vlapic_pending_intr(vcpu->vlapic, NULL)) { vlapic_pending_intr(vcpu->vlapic, NULL)) {
break; break;
} }
} }
/* Don't go to sleep if the vcpu thread needs to yield */ /* Don't go to sleep if the vcpu thread needs to yield */
if (vcpu_should_yield(vm, vcpuid)) if (vcpu_should_yield(vcpu))
break; break;
if (vcpu_debugged(vm, vcpuid)) if (vcpu_debugged(vcpu))
break; break;
/* /*
@ -1701,14 +1695,15 @@ vm_suspend(struct vm *vm, enum vm_suspend_how how)
} }
void void
vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip) vm_exit_suspended(struct vcpu *vcpu, uint64_t rip)
{ {
struct vm *vm = vcpu->vm;
struct vm_exit *vmexit; struct vm_exit *vmexit;
KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
("vm_exit_suspended: invalid suspend type %d", vm->suspend)); ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
vmexit = vm_exitinfo(vm, vcpuid); vmexit = vm_exitinfo(vcpu);
vmexit->rip = rip; vmexit->rip = rip;
vmexit->inst_length = 0; vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_SUSPENDED; vmexit->exitcode = VM_EXITCODE_SUSPENDED;
@ -1716,52 +1711,53 @@ vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
} }
void void
vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip) vm_exit_debug(struct vcpu *vcpu, uint64_t rip)
{ {
struct vm_exit *vmexit; struct vm_exit *vmexit;
vmexit = vm_exitinfo(vm, vcpuid); vmexit = vm_exitinfo(vcpu);
vmexit->rip = rip; vmexit->rip = rip;
vmexit->inst_length = 0; vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_DEBUG; vmexit->exitcode = VM_EXITCODE_DEBUG;
} }
void void
vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip) vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip)
{ {
struct vm_exit *vmexit; struct vm_exit *vmexit;
KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress")); KASSERT(vcpu->vm->rendezvous_func != NULL,
("rendezvous not in progress"));
vmexit = vm_exitinfo(vm, vcpuid); vmexit = vm_exitinfo(vcpu);
vmexit->rip = rip; vmexit->rip = rip;
vmexit->inst_length = 0; vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_RENDEZVOUS, 1); vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1);
} }
void void
vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip) vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip)
{ {
struct vm_exit *vmexit; struct vm_exit *vmexit;
vmexit = vm_exitinfo(vm, vcpuid); vmexit = vm_exitinfo(vcpu);
vmexit->rip = rip; vmexit->rip = rip;
vmexit->inst_length = 0; vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_REQIDLE; vmexit->exitcode = VM_EXITCODE_REQIDLE;
vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_REQIDLE, 1); vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1);
} }
void void
vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip) vm_exit_astpending(struct vcpu *vcpu, uint64_t rip)
{ {
struct vm_exit *vmexit; struct vm_exit *vmexit;
vmexit = vm_exitinfo(vm, vcpuid); vmexit = vm_exitinfo(vcpu);
vmexit->rip = rip; vmexit->rip = rip;
vmexit->inst_length = 0; vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_BOGUS; vmexit->exitcode = VM_EXITCODE_BOGUS;
vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_ASTPENDING, 1); vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1);
} }
int int
@ -1915,16 +1911,10 @@ vm_restart_instruction(struct vcpu *vcpu)
} }
int int
vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) vm_exit_intinfo(struct vcpu *vcpu, uint64_t info)
{ {
struct vcpu *vcpu;
int type, vector; int type, vector;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
if (info & VM_INTINFO_VALID) { if (info & VM_INTINFO_VALID) {
type = info & VM_INTINFO_TYPE; type = info & VM_INTINFO_TYPE;
vector = info & 0xff; vector = info & 0xff;
@ -1937,7 +1927,7 @@ vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
} else { } else {
info = 0; info = 0;
} }
VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info); VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info);
vcpu->exitintinfo = info; vcpu->exitintinfo = info;
return (0); return (0);
} }
@ -1997,7 +1987,7 @@ exception_class(uint64_t info)
} }
static int static int
nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2, nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2,
uint64_t *retinfo) uint64_t *retinfo)
{ {
enum exc_class exc1, exc2; enum exc_class exc1, exc2;
@ -2013,9 +2003,9 @@ nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
type1 = info1 & VM_INTINFO_TYPE; type1 = info1 & VM_INTINFO_TYPE;
vector1 = info1 & 0xff; vector1 = info1 & 0xff;
if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)", VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)",
info1, info2); info1, info2);
vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT);
*retinfo = 0; *retinfo = 0;
return (0); return (0);
} }
@ -2055,17 +2045,11 @@ vcpu_exception_intinfo(struct vcpu *vcpu)
} }
int int
vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo)
{ {
struct vcpu *vcpu;
uint64_t info1, info2; uint64_t info1, info2;
int valid; int valid;
KASSERT(vcpuid >= 0 &&
vcpuid < vm->maxcpus, ("invalid vcpu %d", vcpuid));
vcpu = &vm->vcpu[vcpuid];
info1 = vcpu->exitintinfo; info1 = vcpu->exitintinfo;
vcpu->exitintinfo = 0; vcpu->exitintinfo = 0;
@ -2073,12 +2057,12 @@ vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
if (vcpu->exception_pending) { if (vcpu->exception_pending) {
info2 = vcpu_exception_intinfo(vcpu); info2 = vcpu_exception_intinfo(vcpu);
vcpu->exception_pending = 0; vcpu->exception_pending = 0;
VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx", VMM_CTR2(vcpu, "Exception %d delivered: %#lx",
vcpu->exc_vector, info2); vcpu->exc_vector, info2);
} }
if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
valid = nested_fault(vm, vcpuid, info1, info2, retinfo); valid = nested_fault(vcpu, info1, info2, retinfo);
} else if (info1 & VM_INTINFO_VALID) { } else if (info1 & VM_INTINFO_VALID) {
*retinfo = info1; *retinfo = info1;
valid = 1; valid = 1;
@ -2210,28 +2194,14 @@ vm_inject_nmi(struct vm *vm, int vcpuid)
} }
int int
vm_nmi_pending(struct vm *vm, int vcpuid) vm_nmi_pending(struct vcpu *vcpu)
{ {
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
vcpu = &vm->vcpu[vcpuid];
return (vcpu->nmi_pending); return (vcpu->nmi_pending);
} }
void void
vm_nmi_clear(struct vm *vm, int vcpuid) vm_nmi_clear(struct vcpu *vcpu)
{ {
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
vcpu = &vm->vcpu[vcpuid];
if (vcpu->nmi_pending == 0) if (vcpu->nmi_pending == 0)
panic("vm_nmi_clear: inconsistent nmi_pending state"); panic("vm_nmi_clear: inconsistent nmi_pending state");
@ -2257,28 +2227,14 @@ vm_inject_extint(struct vm *vm, int vcpuid)
} }
int int
vm_extint_pending(struct vm *vm, int vcpuid) vm_extint_pending(struct vcpu *vcpu)
{ {
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
vcpu = &vm->vcpu[vcpuid];
return (vcpu->extint_pending); return (vcpu->extint_pending);
} }
void void
vm_extint_clear(struct vm *vm, int vcpuid) vm_extint_clear(struct vcpu *vcpu)
{ {
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
vcpu = &vm->vcpu[vcpuid];
if (vcpu->extint_pending == 0) if (vcpu->extint_pending == 0)
panic("vm_extint_clear: inconsistent extint_pending state"); panic("vm_extint_clear: inconsistent extint_pending state");
@ -2488,10 +2444,10 @@ vm_resume_cpu(struct vm *vm, int vcpuid)
} }
int int
vcpu_debugged(struct vm *vm, int vcpuid) vcpu_debugged(struct vcpu *vcpu)
{ {
return (CPU_ISSET(vcpuid, &vm->debug_cpus)); return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
} }
cpuset_t cpuset_t
@ -2951,18 +2907,10 @@ vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta)
return (ret); return (ret);
} }
int void
vm_set_tsc_offset(struct vm *vm, int vcpuid, uint64_t offset) vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset)
{ {
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm))
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
vcpu->tsc_offset = offset; vcpu->tsc_offset = offset;
return (0);
} }
int int

View File

@ -857,7 +857,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
break; break;
case VM_SET_INTINFO: case VM_SET_INTINFO:
vmii = (struct vm_intinfo *)data; vmii = (struct vm_intinfo *)data;
error = vm_exit_intinfo(sc->vm, vmii->vcpuid, vmii->info1); error = vm_exit_intinfo(vcpu, vmii->info1);
break; break;
case VM_GET_INTINFO: case VM_GET_INTINFO:
vmii = (struct vm_intinfo *)data; vmii = (struct vm_intinfo *)data;

View File

@ -156,13 +156,13 @@ lapic_msr(u_int msr)
} }
int int
lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval, bool *retu) lapic_rdmsr(struct vcpu *vcpu, u_int msr, uint64_t *rval, bool *retu)
{ {
int error; int error;
u_int offset; u_int offset;
struct vlapic *vlapic; struct vlapic *vlapic;
vlapic = vm_lapic(vm_vcpu(vm, cpu)); vlapic = vm_lapic(vcpu);
if (msr == MSR_APICBASE) { if (msr == MSR_APICBASE) {
*rval = vlapic_get_apicbase(vlapic); *rval = vlapic_get_apicbase(vlapic);
@ -176,13 +176,13 @@ lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval, bool *retu)
} }
int int
lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t val, bool *retu) lapic_wrmsr(struct vcpu *vcpu, u_int msr, uint64_t val, bool *retu)
{ {
int error; int error;
u_int offset; u_int offset;
struct vlapic *vlapic; struct vlapic *vlapic;
vlapic = vm_lapic(vm_vcpu(vm, cpu)); vlapic = vm_lapic(vcpu);
if (msr == MSR_APICBASE) { if (msr == MSR_APICBASE) {
error = vlapic_set_apicbase(vlapic, val); error = vlapic_set_apicbase(vlapic, val);

View File

@ -31,13 +31,12 @@
#ifndef _VMM_LAPIC_H_ #ifndef _VMM_LAPIC_H_
#define _VMM_LAPIC_H_ #define _VMM_LAPIC_H_
struct vcpu;
struct vm; struct vm;
bool lapic_msr(u_int num); bool lapic_msr(u_int num);
int lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval, int lapic_rdmsr(struct vcpu *vcpu, u_int msr, uint64_t *rval, bool *retu);
bool *retu); int lapic_wrmsr(struct vcpu *vcpu, u_int msr, uint64_t wval, bool *retu);
int lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t wval,
bool *retu);
int lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, int lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa,
uint64_t *rval, int size, void *arg); uint64_t *rval, int size, void *arg);

View File

@ -87,9 +87,11 @@ log2(u_int x)
} }
int int
x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx, x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
uint64_t *rcx, uint64_t *rdx) uint64_t *rcx, uint64_t *rdx)
{ {
struct vm *vm = vcpu_vm(vcpu);
int vcpu_id = vcpu_vcpuid(vcpu);
const struct xsave_limits *limits; const struct xsave_limits *limits;
uint64_t cr4; uint64_t cr4;
int error, enable_invpcid, enable_rdpid, enable_rdtscp, level, int error, enable_invpcid, enable_rdpid, enable_rdtscp, level,
@ -349,7 +351,7 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx,
*/ */
regs[2] &= ~CPUID2_OSXSAVE; regs[2] &= ~CPUID2_OSXSAVE;
if (regs[2] & CPUID2_XSAVE) { if (regs[2] & CPUID2_XSAVE) {
error = vm_get_register(vm_vcpu(vm, vcpu_id), error = vm_get_register(vcpu,
VM_REG_GUEST_CR4, &cr4); VM_REG_GUEST_CR4, &cr4);
if (error) if (error)
panic("x86_emulate_cpuid: error %d " panic("x86_emulate_cpuid: error %d "
@ -637,7 +639,7 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx,
} }
bool bool
vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap) vm_cpuid_capability(struct vcpu *vcpu, enum vm_cpuid_capability cap)
{ {
bool rv; bool rv;

View File

@ -66,7 +66,7 @@
*/ */
#define CPUID_0000_0001_FEAT0_VMX (1<<5) #define CPUID_0000_0001_FEAT0_VMX (1<<5)
int x86_emulate_cpuid(struct vm *vm, int vcpu_id, uint64_t *rax, uint64_t *rbx, int x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx,
uint64_t *rcx, uint64_t *rdx); uint64_t *rcx, uint64_t *rdx);
enum vm_cpuid_capability { enum vm_cpuid_capability {
@ -81,7 +81,7 @@ enum vm_cpuid_capability {
* Return 'true' if the capability 'cap' is enabled in this virtual cpu * Return 'true' if the capability 'cap' is enabled in this virtual cpu
* and 'false' otherwise. * and 'false' otherwise.
*/ */
bool vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability); bool vm_cpuid_capability(struct vcpu *vcpu, enum vm_cpuid_capability);
#define VMM_MTRR_VAR_MAX 10 #define VMM_MTRR_VAR_MAX 10
#define VMM_MTRR_DEF_MASK \ #define VMM_MTRR_DEF_MASK \