vmm: Remove the per-vm cookie argument from vmmops taking a vcpu.
This requires storing a reference to the per-vm cookie in the CPU-specific vCPU structure. Take advantage of this new field to remove no-longer-needed function arguments in the CPU-specific backends. In particular, stop passing the per-vm cookie to functions that either don't use it or only use it for KTR traces. Reviewed by: corvink, markj Differential Revision: https://reviews.freebsd.org/D37152
This commit is contained in:
parent
1aa5150479
commit
869c8d1946
@ -167,29 +167,24 @@ typedef int (*vmm_init_func_t)(int ipinum);
|
||||
typedef int (*vmm_cleanup_func_t)(void);
|
||||
typedef void (*vmm_resume_func_t)(void);
|
||||
typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
|
||||
typedef int (*vmi_run_func_t)(void *vmi, void *vcpui, register_t rip,
|
||||
typedef int (*vmi_run_func_t)(void *vcpui, register_t rip,
|
||||
struct pmap *pmap, struct vm_eventinfo *info);
|
||||
typedef void (*vmi_cleanup_func_t)(void *vmi);
|
||||
typedef void * (*vmi_vcpu_init_func_t)(void *vmi, int vcpu_id);
|
||||
typedef void (*vmi_vcpu_cleanup_func_t)(void *vmi, void *vcpui);
|
||||
typedef int (*vmi_get_register_t)(void *vmi, void *vcpui, int num,
|
||||
uint64_t *retval);
|
||||
typedef int (*vmi_set_register_t)(void *vmi, void *vcpui, int num,
|
||||
uint64_t val);
|
||||
typedef int (*vmi_get_desc_t)(void *vmi, void *vcpui, int num,
|
||||
struct seg_desc *desc);
|
||||
typedef int (*vmi_set_desc_t)(void *vmi, void *vcpui, int num,
|
||||
struct seg_desc *desc);
|
||||
typedef int (*vmi_get_cap_t)(void *vmi, void *vcpui, int num, int *retval);
|
||||
typedef int (*vmi_set_cap_t)(void *vmi, void *vcpui, int num, int val);
|
||||
typedef void (*vmi_vcpu_cleanup_func_t)(void *vcpui);
|
||||
typedef int (*vmi_get_register_t)(void *vcpui, int num, uint64_t *retval);
|
||||
typedef int (*vmi_set_register_t)(void *vcpui, int num, uint64_t val);
|
||||
typedef int (*vmi_get_desc_t)(void *vcpui, int num, struct seg_desc *desc);
|
||||
typedef int (*vmi_set_desc_t)(void *vcpui, int num, struct seg_desc *desc);
|
||||
typedef int (*vmi_get_cap_t)(void *vcpui, int num, int *retval);
|
||||
typedef int (*vmi_set_cap_t)(void *vcpui, int num, int val);
|
||||
typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
|
||||
typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
|
||||
typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, void *vcpui);
|
||||
typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
|
||||
typedef struct vlapic * (*vmi_vlapic_init)(void *vcpui);
|
||||
typedef void (*vmi_vlapic_cleanup)(struct vlapic *vlapic);
|
||||
typedef int (*vmi_snapshot_t)(void *vmi, struct vm_snapshot_meta *meta);
|
||||
typedef int (*vmi_snapshot_vcpu_t)(void *vmi, struct vm_snapshot_meta *meta,
|
||||
void *vcpui);
|
||||
typedef int (*vmi_restore_tsc_t)(void *vmi, void *vcpui, uint64_t now);
|
||||
typedef int (*vmi_snapshot_vcpu_t)(void *vcpui, struct vm_snapshot_meta *meta);
|
||||
typedef int (*vmi_restore_tsc_t)(void *vcpui, uint64_t now);
|
||||
|
||||
struct vmm_ops {
|
||||
vmm_init_func_t modinit; /* module wide initialization */
|
||||
|
@ -132,8 +132,8 @@ static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
|
||||
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
|
||||
static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
|
||||
|
||||
static int svm_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc);
|
||||
static int svm_setreg(void *arg, void *vcpui, int ident, uint64_t val);
|
||||
static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc);
|
||||
static int svm_setreg(void *vcpui, int ident, uint64_t val);
|
||||
|
||||
static __inline int
|
||||
flush_by_asid(void)
|
||||
@ -382,8 +382,7 @@ svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
|
||||
}
|
||||
|
||||
static __inline int
|
||||
svm_get_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int idx,
|
||||
uint32_t bitmask)
|
||||
svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
|
||||
@ -394,8 +393,7 @@ svm_get_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int idx,
|
||||
}
|
||||
|
||||
static __inline void
|
||||
svm_set_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int idx,
|
||||
uint32_t bitmask, int enabled)
|
||||
svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
uint32_t oldval;
|
||||
@ -412,25 +410,23 @@ svm_set_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int idx,
|
||||
|
||||
if (ctrl->intercept[idx] != oldval) {
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_I);
|
||||
VCPU_CTR3(sc->vm, vcpu->vcpuid, "intercept[%d] modified "
|
||||
VCPU_CTR3(vcpu->sc->vm, vcpu->vcpuid, "intercept[%d] modified "
|
||||
"from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
|
||||
}
|
||||
}
|
||||
|
||||
static __inline void
|
||||
svm_disable_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int off,
|
||||
uint32_t bitmask)
|
||||
svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
|
||||
{
|
||||
|
||||
svm_set_intercept(sc, vcpu, off, bitmask, 0);
|
||||
svm_set_intercept(vcpu, off, bitmask, 0);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
svm_enable_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int off,
|
||||
uint32_t bitmask)
|
||||
svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
|
||||
{
|
||||
|
||||
svm_set_intercept(sc, vcpu, off, bitmask, 1);
|
||||
svm_set_intercept(vcpu, off, bitmask, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -459,9 +455,9 @@ vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
|
||||
for (n = 0; n < 16; n++) {
|
||||
mask = (BIT(n) << 16) | BIT(n);
|
||||
if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
|
||||
svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
|
||||
svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask);
|
||||
else
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
|
||||
svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -476,41 +472,40 @@ vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
|
||||
if (n == 2 || n == 9) {
|
||||
continue;
|
||||
}
|
||||
svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
|
||||
svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n));
|
||||
}
|
||||
} else {
|
||||
svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
|
||||
svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
|
||||
}
|
||||
|
||||
/* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
|
||||
VMCB_INTCPT_FERR_FREEZE);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
|
||||
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
|
||||
|
||||
/*
|
||||
* Intercept SVM instructions since AMD enables them in guests otherwise.
|
||||
* Non-intercepted VMMCALL causes #UD, skip it.
|
||||
*/
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
|
||||
if (vcpu_trap_wbinvd(sc->vm, vcpu->vcpuid)) {
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT,
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT,
|
||||
VMCB_INTCPT_WBINVD);
|
||||
}
|
||||
|
||||
@ -518,7 +513,7 @@ vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
|
||||
* From section "Canonicalization and Consistency Checks" in APMv2
|
||||
* the VMRUN intercept bit must be set to pass the consistency check.
|
||||
*/
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
|
||||
|
||||
/*
|
||||
* The ASID will be set to a non-zero value just before VMRUN.
|
||||
@ -614,12 +609,13 @@ svm_init(struct vm *vm, pmap_t pmap)
|
||||
}
|
||||
|
||||
static void *
|
||||
svm_vcpu_init(void *arg, int vcpuid)
|
||||
svm_vcpu_init(void *vmi, int vcpuid)
|
||||
{
|
||||
struct svm_softc *sc = arg;
|
||||
struct svm_softc *sc = vmi;
|
||||
struct svm_vcpu *vcpu;
|
||||
|
||||
vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO);
|
||||
vcpu->sc = sc;
|
||||
vcpu->vcpuid = vcpuid;
|
||||
vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM,
|
||||
M_WAITOK | M_ZERO);
|
||||
@ -724,8 +720,8 @@ svm_inout_str_count(struct svm_regctx *regs, int rep)
|
||||
}
|
||||
|
||||
static void
|
||||
svm_inout_str_seginfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
int64_t info1, int in, struct vm_inout_str *vis)
|
||||
svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in,
|
||||
struct vm_inout_str *vis)
|
||||
{
|
||||
int error __diagused, s;
|
||||
|
||||
@ -737,7 +733,7 @@ svm_inout_str_seginfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
vis->seg_name = vm_segment_name(s);
|
||||
}
|
||||
|
||||
error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
|
||||
error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
|
||||
KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
|
||||
}
|
||||
|
||||
@ -778,8 +774,7 @@ svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
|
||||
* Handle guest I/O intercept.
|
||||
*/
|
||||
static int
|
||||
svm_handle_io(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
struct vm_exit *vmexit)
|
||||
svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
struct vmcb_state *state;
|
||||
@ -822,8 +817,7 @@ svm_handle_io(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
|
||||
vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
|
||||
vis->addrsize = svm_inout_str_addrsize(info1);
|
||||
svm_inout_str_seginfo(svm_sc, vcpu, info1,
|
||||
vmexit->u.inout.in, vis);
|
||||
svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis);
|
||||
}
|
||||
|
||||
return (UNHANDLED);
|
||||
@ -937,8 +931,8 @@ intrtype_to_str(int intr_type)
|
||||
* Inject an event to vcpu as described in section 15.20, "Event injection".
|
||||
*/
|
||||
static void
|
||||
svm_eventinject(struct svm_softc *sc, struct svm_vcpu *vcpu, int intr_type,
|
||||
int vector, uint32_t error, bool ec_valid)
|
||||
svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector,
|
||||
uint32_t error, bool ec_valid)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
|
||||
@ -967,23 +961,24 @@ svm_eventinject(struct svm_softc *sc, struct svm_vcpu *vcpu, int intr_type,
|
||||
if (ec_valid) {
|
||||
ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
|
||||
ctrl->eventinj |= (uint64_t)error << 32;
|
||||
VCPU_CTR3(sc->vm, vcpu->vcpuid,
|
||||
VCPU_CTR3(vcpu->sc->vm, vcpu->vcpuid,
|
||||
"Injecting %s at vector %d errcode %#x",
|
||||
intrtype_to_str(intr_type), vector, error);
|
||||
} else {
|
||||
VCPU_CTR2(sc->vm, vcpu->vcpuid, "Injecting %s at vector %d",
|
||||
VCPU_CTR2(vcpu->sc->vm, vcpu->vcpuid,
|
||||
"Injecting %s at vector %d",
|
||||
intrtype_to_str(intr_type), vector);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
svm_update_virqinfo(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
svm_update_virqinfo(struct svm_vcpu *vcpu)
|
||||
{
|
||||
struct vm *vm;
|
||||
struct vlapic *vlapic;
|
||||
struct vmcb_ctrl *ctrl;
|
||||
|
||||
vm = sc->vm;
|
||||
vm = vcpu->sc->vm;
|
||||
vlapic = vm_lapic(vm, vcpu->vcpuid);
|
||||
ctrl = svm_get_vmcb_ctrl(vcpu);
|
||||
|
||||
@ -1022,16 +1017,15 @@ svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
|
||||
|
||||
#ifdef INVARIANTS
|
||||
static __inline int
|
||||
vintr_intercept_enabled(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
vintr_intercept_enabled(struct svm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
|
||||
VMCB_INTCPT_VINTR));
|
||||
return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR));
|
||||
}
|
||||
#endif
|
||||
|
||||
static __inline void
|
||||
enable_intr_window_exiting(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
enable_intr_window_exiting(struct svm_vcpu *vcpu)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
|
||||
@ -1039,42 +1033,41 @@ enable_intr_window_exiting(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
|
||||
if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
|
||||
KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
|
||||
KASSERT(vintr_intercept_enabled(sc, vcpu),
|
||||
KASSERT(vintr_intercept_enabled(vcpu),
|
||||
("%s: vintr intercept should be enabled", __func__));
|
||||
return;
|
||||
}
|
||||
|
||||
VCPU_CTR0(sc->vm, vcpu->vcpuid, "Enable intr window exiting");
|
||||
VCPU_CTR0(vcpu->sc->vm, vcpu->vcpuid, "Enable intr window exiting");
|
||||
ctrl->v_irq = 1;
|
||||
ctrl->v_ign_tpr = 1;
|
||||
ctrl->v_intr_vector = 0;
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_TPR);
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
disable_intr_window_exiting(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
disable_intr_window_exiting(struct svm_vcpu *vcpu)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
|
||||
ctrl = svm_get_vmcb_ctrl(vcpu);
|
||||
|
||||
if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
|
||||
KASSERT(!vintr_intercept_enabled(sc, vcpu),
|
||||
KASSERT(!vintr_intercept_enabled(vcpu),
|
||||
("%s: vintr intercept should be disabled", __func__));
|
||||
return;
|
||||
}
|
||||
|
||||
VCPU_CTR0(sc->vm, vcpu->vcpuid, "Disable intr window exiting");
|
||||
VCPU_CTR0(vcpu->sc->vm, vcpu->vcpuid, "Disable intr window exiting");
|
||||
ctrl->v_irq = 0;
|
||||
ctrl->v_intr_vector = 0;
|
||||
svm_set_dirty(vcpu, VMCB_CACHE_TPR);
|
||||
svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
|
||||
svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
|
||||
}
|
||||
|
||||
static int
|
||||
svm_modify_intr_shadow(struct svm_softc *sc, struct svm_vcpu *vcpu,
|
||||
uint64_t val)
|
||||
svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
int oldval, newval;
|
||||
@ -1084,13 +1077,14 @@ svm_modify_intr_shadow(struct svm_softc *sc, struct svm_vcpu *vcpu,
|
||||
newval = val ? 1 : 0;
|
||||
if (newval != oldval) {
|
||||
ctrl->intr_shadow = newval;
|
||||
VCPU_CTR1(sc->vm, vcpu->vcpuid, "Setting intr_shadow to %d", newval);
|
||||
VCPU_CTR1(vcpu->sc->vm, vcpu->vcpuid,
|
||||
"Setting intr_shadow to %d", newval);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
svm_get_intr_shadow(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t *val)
|
||||
svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
|
||||
@ -1105,31 +1099,30 @@ svm_get_intr_shadow(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t *val)
|
||||
* to track when the vcpu is done handling the NMI.
|
||||
*/
|
||||
static int
|
||||
nmi_blocked(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
nmi_blocked(struct svm_vcpu *vcpu)
|
||||
{
|
||||
int blocked;
|
||||
|
||||
blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
|
||||
VMCB_INTCPT_IRET);
|
||||
blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
|
||||
return (blocked);
|
||||
}
|
||||
|
||||
static void
|
||||
enable_nmi_blocking(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
enable_nmi_blocking(struct svm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
|
||||
VCPU_CTR0(sc->vm, vcpu->vcpuid, "vNMI blocking enabled");
|
||||
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
|
||||
KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked"));
|
||||
VCPU_CTR0(vcpu->sc->vm, vcpu->vcpuid, "vNMI blocking enabled");
|
||||
svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
|
||||
}
|
||||
|
||||
static void
|
||||
clear_nmi_blocking(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
clear_nmi_blocking(struct svm_vcpu *vcpu)
|
||||
{
|
||||
int error __diagused;
|
||||
|
||||
KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
|
||||
VCPU_CTR0(sc->vm, vcpu->vcpuid, "vNMI blocking cleared");
|
||||
KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked"));
|
||||
VCPU_CTR0(vcpu->sc->vm, vcpu->vcpuid, "vNMI blocking cleared");
|
||||
/*
|
||||
* When the IRET intercept is cleared the vcpu will attempt to execute
|
||||
* the "iret" when it runs next. However, it is possible to inject
|
||||
@ -1141,13 +1134,13 @@ clear_nmi_blocking(struct svm_softc *sc, struct svm_vcpu *vcpu)
|
||||
*
|
||||
* XXX this needs to be fixed
|
||||
*/
|
||||
svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
|
||||
svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
|
||||
|
||||
/*
|
||||
* Set 'intr_shadow' to prevent an NMI from being injected on the
|
||||
* immediate VMRUN.
|
||||
*/
|
||||
error = svm_modify_intr_shadow(sc, vcpu, 1);
|
||||
error = svm_modify_intr_shadow(vcpu, 1);
|
||||
KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
|
||||
}
|
||||
|
||||
@ -1215,7 +1208,7 @@ svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
|
||||
goto gpf;
|
||||
}
|
||||
|
||||
error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
|
||||
error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval);
|
||||
KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
|
||||
return (0);
|
||||
gpf:
|
||||
@ -1383,7 +1376,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
|
||||
vmexit->inst_length, code, info1, info2));
|
||||
|
||||
svm_update_virqinfo(svm_sc, vcpu);
|
||||
svm_update_virqinfo(vcpu);
|
||||
svm_save_intinfo(svm_sc, vcpu);
|
||||
|
||||
switch (code) {
|
||||
@ -1392,7 +1385,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
* Restart execution at "iret" but with the intercept cleared.
|
||||
*/
|
||||
vmexit->inst_length = 0;
|
||||
clear_nmi_blocking(svm_sc, vcpu);
|
||||
clear_nmi_blocking(vcpu);
|
||||
handled = 1;
|
||||
break;
|
||||
case VMCB_EXIT_VINTR: /* interrupt window exiting */
|
||||
@ -1421,8 +1414,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
__asm __volatile("int $18");
|
||||
break;
|
||||
case IDT_PF:
|
||||
error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
|
||||
info2);
|
||||
error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2);
|
||||
KASSERT(error == 0, ("%s: error %d updating cr2",
|
||||
__func__, error));
|
||||
/* fallthru */
|
||||
@ -1512,7 +1504,7 @@ svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
|
||||
}
|
||||
break;
|
||||
case VMCB_EXIT_IO:
|
||||
handled = svm_handle_io(svm_sc, vcpu, vmexit);
|
||||
handled = svm_handle_io(vcpu, vmexit);
|
||||
vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_INOUT, 1);
|
||||
break;
|
||||
case VMCB_EXIT_CPUID:
|
||||
@ -1617,7 +1609,7 @@ svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
|
||||
KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
|
||||
"valid: %#lx", __func__, intinfo));
|
||||
|
||||
svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
|
||||
svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
|
||||
VMCB_EXITINTINFO_VECTOR(intinfo),
|
||||
VMCB_EXITINTINFO_EC(intinfo),
|
||||
VMCB_EXITINTINFO_EC_VALID(intinfo));
|
||||
@ -1664,7 +1656,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
|
||||
|
||||
/* NMI event has priority over interrupts. */
|
||||
if (vm_nmi_pending(sc->vm, vcpuid)) {
|
||||
if (nmi_blocked(sc, vcpu)) {
|
||||
if (nmi_blocked(vcpu)) {
|
||||
/*
|
||||
* Can't inject another NMI if the guest has not
|
||||
* yet executed an "iret" after the last NMI.
|
||||
@ -1702,11 +1694,11 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
|
||||
vm_nmi_clear(sc->vm, vcpuid);
|
||||
|
||||
/* Inject NMI, vector number is not used */
|
||||
svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
|
||||
svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI,
|
||||
IDT_NMI, 0, false);
|
||||
|
||||
/* virtual NMI blocking is now in effect */
|
||||
enable_nmi_blocking(sc, vcpu);
|
||||
enable_nmi_blocking(vcpu);
|
||||
|
||||
VCPU_CTR0(sc->vm, vcpuid, "Injecting vNMI");
|
||||
}
|
||||
@ -1750,7 +1742,7 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
|
||||
goto done;
|
||||
}
|
||||
|
||||
svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
|
||||
svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
|
||||
|
||||
if (!extint_pending) {
|
||||
vlapic_intr_accepted(vlapic, vector);
|
||||
@ -1802,9 +1794,9 @@ svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
|
||||
("Bogus intr_window_exiting: eventinj (%#lx), "
|
||||
"intr_shadow (%u), rflags (%#lx)",
|
||||
ctrl->eventinj, ctrl->intr_shadow, state->rflags));
|
||||
enable_intr_window_exiting(sc, vcpu);
|
||||
enable_intr_window_exiting(vcpu);
|
||||
} else {
|
||||
disable_intr_window_exiting(sc, vcpu);
|
||||
disable_intr_window_exiting(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1826,7 +1818,7 @@ restore_host_tss(void)
|
||||
}
|
||||
|
||||
static void
|
||||
svm_pmap_activate(struct svm_softc *sc, struct svm_vcpu *vcpu, pmap_t pmap)
|
||||
svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap)
|
||||
{
|
||||
struct vmcb_ctrl *ctrl;
|
||||
long eptgen;
|
||||
@ -2007,8 +1999,7 @@ svm_dr_leave_guest(struct svm_regctx *gctx)
|
||||
* Start vcpu with specified RIP.
|
||||
*/
|
||||
static int
|
||||
svm_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
struct vm_eventinfo *evinfo)
|
||||
svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
|
||||
{
|
||||
struct svm_regctx *gctx;
|
||||
struct svm_softc *svm_sc;
|
||||
@ -2022,11 +2013,10 @@ svm_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
int handled, vcpuid;
|
||||
uint16_t ldt_sel;
|
||||
|
||||
svm_sc = arg;
|
||||
vm = svm_sc->vm;
|
||||
|
||||
vcpu = vcpui;
|
||||
vcpuid = vcpu->vcpuid;
|
||||
svm_sc = vcpu->sc;
|
||||
vm = svm_sc->vm;
|
||||
state = svm_get_vmcb_state(vcpu);
|
||||
ctrl = svm_get_vmcb_ctrl(vcpu);
|
||||
vmexit = vm_exitinfo(vm, vcpuid);
|
||||
@ -2121,7 +2111,7 @@ svm_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
* Check the pmap generation and the ASID generation to
|
||||
* ensure that the vcpu does not use stale TLB mappings.
|
||||
*/
|
||||
svm_pmap_activate(svm_sc, vcpu, pmap);
|
||||
svm_pmap_activate(vcpu, pmap);
|
||||
|
||||
ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty;
|
||||
vcpu->dirty = 0;
|
||||
@ -2161,7 +2151,7 @@ svm_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
}
|
||||
|
||||
static void
|
||||
svm_vcpu_cleanup(void *arg, void *vcpui)
|
||||
svm_vcpu_cleanup(void *vcpui)
|
||||
{
|
||||
struct svm_vcpu *vcpu = vcpui;
|
||||
|
||||
@ -2170,9 +2160,9 @@ svm_vcpu_cleanup(void *arg, void *vcpui)
|
||||
}
|
||||
|
||||
static void
|
||||
svm_cleanup(void *arg)
|
||||
svm_cleanup(void *vmi)
|
||||
{
|
||||
struct svm_softc *sc = arg;
|
||||
struct svm_softc *sc = vmi;
|
||||
|
||||
contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM);
|
||||
contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM);
|
||||
@ -2226,20 +2216,18 @@ swctx_regptr(struct svm_regctx *regctx, int reg)
|
||||
}
|
||||
|
||||
static int
|
||||
svm_getreg(void *arg, void *vcpui, int ident, uint64_t *val)
|
||||
svm_getreg(void *vcpui, int ident, uint64_t *val)
|
||||
{
|
||||
struct svm_softc *svm_sc;
|
||||
struct svm_vcpu *vcpu;
|
||||
register_t *reg;
|
||||
|
||||
svm_sc = arg;
|
||||
vcpu = vcpui;
|
||||
|
||||
if (ident == VM_REG_GUEST_INTR_SHADOW) {
|
||||
return (svm_get_intr_shadow(svm_sc, vcpu, val));
|
||||
return (svm_get_intr_shadow(vcpu, val));
|
||||
}
|
||||
|
||||
if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
|
||||
if (vmcb_read(vcpu, ident, val) == 0) {
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -2250,28 +2238,26 @@ svm_getreg(void *arg, void *vcpui, int ident, uint64_t *val)
|
||||
return (0);
|
||||
}
|
||||
|
||||
VCPU_CTR1(svm_sc->vm, vcpu->vcpuid, "svm_getreg: unknown register %#x",
|
||||
ident);
|
||||
VCPU_CTR1(vcpu->sc->vm, vcpu->vcpuid,
|
||||
"svm_getreg: unknown register %#x", ident);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
static int
|
||||
svm_setreg(void *arg, void *vcpui, int ident, uint64_t val)
|
||||
svm_setreg(void *vcpui, int ident, uint64_t val)
|
||||
{
|
||||
struct svm_softc *svm_sc;
|
||||
struct svm_vcpu *vcpu;
|
||||
register_t *reg;
|
||||
|
||||
svm_sc = arg;
|
||||
vcpu = vcpui;
|
||||
|
||||
if (ident == VM_REG_GUEST_INTR_SHADOW) {
|
||||
return (svm_modify_intr_shadow(svm_sc, vcpu, val));
|
||||
return (svm_modify_intr_shadow(vcpu, val));
|
||||
}
|
||||
|
||||
/* Do not permit user write access to VMCB fields by offset. */
|
||||
if (!VMCB_ACCESS_OK(ident)) {
|
||||
if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
|
||||
if (vmcb_write(vcpu, ident, val) == 0) {
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
@ -2294,33 +2280,32 @@ svm_setreg(void *arg, void *vcpui, int ident, uint64_t val)
|
||||
* whether 'running' is true/false.
|
||||
*/
|
||||
|
||||
VCPU_CTR1(svm_sc->vm, vcpu->vcpuid, "svm_setreg: unknown register %#x",
|
||||
ident);
|
||||
VCPU_CTR1(vcpu->sc->vm, vcpu->vcpuid,
|
||||
"svm_setreg: unknown register %#x", ident);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
static int
|
||||
svm_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
|
||||
svm_getdesc(void *vcpui, int reg, struct seg_desc *desc)
|
||||
{
|
||||
return (vmcb_getdesc(arg, vcpui, reg, desc));
|
||||
return (vmcb_getdesc(vcpui, reg, desc));
|
||||
}
|
||||
|
||||
static int
|
||||
svm_setdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
|
||||
svm_setdesc(void *vcpui, int reg, struct seg_desc *desc)
|
||||
{
|
||||
return (vmcb_setdesc(arg, vcpui, reg, desc));
|
||||
return (vmcb_setdesc(vcpui, reg, desc));
|
||||
}
|
||||
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
static int
|
||||
svm_snapshot_reg(void *arg, void *vcpui, int ident,
|
||||
struct vm_snapshot_meta *meta)
|
||||
svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta)
|
||||
{
|
||||
int ret;
|
||||
uint64_t val;
|
||||
|
||||
if (meta->op == VM_SNAPSHOT_SAVE) {
|
||||
ret = svm_getreg(arg, vcpui, ident, &val);
|
||||
ret = svm_getreg(vcpui, ident, &val);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
|
||||
@ -2328,7 +2313,7 @@ svm_snapshot_reg(void *arg, void *vcpui, int ident,
|
||||
} else if (meta->op == VM_SNAPSHOT_RESTORE) {
|
||||
SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
|
||||
|
||||
ret = svm_setreg(arg, vcpui, ident, val);
|
||||
ret = svm_setreg(vcpui, ident, val);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
} else {
|
||||
@ -2342,23 +2327,22 @@ svm_snapshot_reg(void *arg, void *vcpui, int ident,
|
||||
#endif
|
||||
|
||||
static int
|
||||
svm_setcap(void *arg, void *vcpui, int type, int val)
|
||||
svm_setcap(void *vcpui, int type, int val)
|
||||
{
|
||||
struct svm_softc *sc;
|
||||
struct svm_vcpu *vcpu;
|
||||
struct vlapic *vlapic;
|
||||
int error;
|
||||
|
||||
sc = arg;
|
||||
vcpu = vcpui;
|
||||
error = 0;
|
||||
|
||||
switch (type) {
|
||||
case VM_CAP_HALT_EXIT:
|
||||
svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
|
||||
svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
|
||||
VMCB_INTCPT_HLT, val);
|
||||
break;
|
||||
case VM_CAP_PAUSE_EXIT:
|
||||
svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
|
||||
svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
|
||||
VMCB_INTCPT_PAUSE, val);
|
||||
break;
|
||||
case VM_CAP_UNRESTRICTED_GUEST:
|
||||
@ -2367,7 +2351,7 @@ svm_setcap(void *arg, void *vcpui, int type, int val)
|
||||
error = EINVAL;
|
||||
break;
|
||||
case VM_CAP_IPI_EXIT:
|
||||
vlapic = vm_lapic(sc->vm, vcpu->vcpuid);
|
||||
vlapic = vm_lapic(vcpu->sc->vm, vcpu->vcpuid);
|
||||
vlapic->ipi_exit = val;
|
||||
break;
|
||||
default:
|
||||
@ -2378,31 +2362,29 @@ svm_setcap(void *arg, void *vcpui, int type, int val)
|
||||
}
|
||||
|
||||
static int
|
||||
svm_getcap(void *arg, void *vcpui, int type, int *retval)
|
||||
svm_getcap(void *vcpui, int type, int *retval)
|
||||
{
|
||||
struct svm_softc *sc;
|
||||
struct svm_vcpu *vcpu;
|
||||
struct vlapic *vlapic;
|
||||
int error;
|
||||
|
||||
sc = arg;
|
||||
vcpu = vcpui;
|
||||
error = 0;
|
||||
|
||||
switch (type) {
|
||||
case VM_CAP_HALT_EXIT:
|
||||
*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
|
||||
*retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
|
||||
VMCB_INTCPT_HLT);
|
||||
break;
|
||||
case VM_CAP_PAUSE_EXIT:
|
||||
*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
|
||||
*retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
|
||||
VMCB_INTCPT_PAUSE);
|
||||
break;
|
||||
case VM_CAP_UNRESTRICTED_GUEST:
|
||||
*retval = 1; /* unrestricted guest is always enabled */
|
||||
break;
|
||||
case VM_CAP_IPI_EXIT:
|
||||
vlapic = vm_lapic(sc->vm, vcpu->vcpuid);
|
||||
vlapic = vm_lapic(vcpu->sc->vm, vcpu->vcpuid);
|
||||
*retval = vlapic->ipi_exit;
|
||||
break;
|
||||
default:
|
||||
@ -2425,16 +2407,14 @@ svm_vmspace_free(struct vmspace *vmspace)
|
||||
}
|
||||
|
||||
static struct vlapic *
|
||||
svm_vlapic_init(void *arg, void *vcpui)
|
||||
svm_vlapic_init(void *vcpui)
|
||||
{
|
||||
struct svm_softc *svm_sc;
|
||||
struct svm_vcpu *vcpu;
|
||||
struct vlapic *vlapic;
|
||||
|
||||
svm_sc = arg;
|
||||
vcpu = vcpui;
|
||||
vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
|
||||
vlapic->vm = svm_sc->vm;
|
||||
vlapic->vm = vcpu->sc->vm;
|
||||
vlapic->vcpuid = vcpu->vcpuid;
|
||||
vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC,
|
||||
M_WAITOK | M_ZERO);
|
||||
@ -2445,7 +2425,7 @@ svm_vlapic_init(void *arg, void *vcpui)
|
||||
}
|
||||
|
||||
static void
|
||||
svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
|
||||
svm_vlapic_cleanup(struct vlapic *vlapic)
|
||||
{
|
||||
|
||||
vlapic_cleanup(vlapic);
|
||||
@ -2455,7 +2435,7 @@ svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
|
||||
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
static int
|
||||
svm_snapshot(void *arg, struct vm_snapshot_meta *meta)
|
||||
svm_snapshot(void *vmi, struct vm_snapshot_meta *meta)
|
||||
{
|
||||
if (meta->op == VM_SNAPSHOT_RESTORE)
|
||||
flush_by_asid();
|
||||
@ -2464,163 +2444,159 @@ svm_snapshot(void *arg, struct vm_snapshot_meta *meta)
|
||||
}
|
||||
|
||||
static int
|
||||
svm_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
|
||||
svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta)
|
||||
{
|
||||
struct svm_softc *sc;
|
||||
struct svm_vcpu *vcpu;
|
||||
int err, running, hostcpu;
|
||||
|
||||
sc = (struct svm_softc *)arg;
|
||||
vcpu = vcpui;
|
||||
err = 0;
|
||||
|
||||
KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
|
||||
|
||||
running = vcpu_is_running(sc->vm, vcpu->vcpuid, &hostcpu);
|
||||
running = vcpu_is_running(vcpu->sc->vm, vcpu->vcpuid, &hostcpu);
|
||||
if (running && hostcpu != curcpu) {
|
||||
printf("%s: %s%d is running", __func__, vm_name(sc->vm),
|
||||
printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm),
|
||||
vcpu->vcpuid);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR0, meta);
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR2, meta);
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR3, meta);
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR4, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta);
|
||||
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR6, meta);
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR7, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta);
|
||||
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RAX, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta);
|
||||
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RSP, meta);
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RIP, meta);
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RFLAGS, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta);
|
||||
|
||||
/* Guest segments */
|
||||
/* ES */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_ES, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_ES, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta);
|
||||
|
||||
/* CS */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CS, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_CS, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta);
|
||||
|
||||
/* SS */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_SS, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_SS, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta);
|
||||
|
||||
/* DS */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DS, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_DS, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta);
|
||||
|
||||
/* FS */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_FS, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_FS, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta);
|
||||
|
||||
/* GS */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_GS, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GS, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta);
|
||||
|
||||
/* TR */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_TR, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_TR, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta);
|
||||
|
||||
/* LDTR */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_LDTR, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_LDTR, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta);
|
||||
|
||||
/* EFER */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_EFER, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta);
|
||||
|
||||
/* IDTR and GDTR */
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_IDTR, meta);
|
||||
err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GDTR, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta);
|
||||
err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta);
|
||||
|
||||
/* Specific AMD registers */
|
||||
err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
|
||||
err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_ASID, 4), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_CPL, 1), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_STAR, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta);
|
||||
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta);
|
||||
err += vmcb_snapshot_any(sc, vcpu,
|
||||
err += vmcb_snapshot_any(vcpu,
|
||||
VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta);
|
||||
if (err != 0)
|
||||
goto done;
|
||||
@ -2669,11 +2645,12 @@ svm_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
|
||||
}
|
||||
|
||||
static int
|
||||
svm_restore_tsc(void *arg, void *vcpui, uint64_t offset)
|
||||
svm_restore_tsc(void *vcpui, uint64_t offset)
|
||||
{
|
||||
struct svm_vcpu *vcpu = vcpui;
|
||||
int err;
|
||||
|
||||
err = svm_set_tsc_offset(arg, vcpui, offset);
|
||||
err = svm_set_tsc_offset(vcpu->sc, vcpu, offset);
|
||||
|
||||
return (err);
|
||||
}
|
||||
|
@ -36,12 +36,15 @@
|
||||
#define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE)
|
||||
#define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE)
|
||||
|
||||
struct svm_softc;
|
||||
|
||||
struct asid {
|
||||
uint64_t gen; /* range is [1, ~0UL] */
|
||||
uint32_t num; /* range is [1, nasid - 1] */
|
||||
};
|
||||
|
||||
struct svm_vcpu {
|
||||
struct svm_softc *sc;
|
||||
struct vmcb *vmcb; /* hardware saved vcpu context */
|
||||
struct svm_regctx swctx; /* software saved vcpu context */
|
||||
uint64_t vmcb_pa; /* VMCB physical address */
|
||||
|
@ -116,8 +116,7 @@ vmcb_segptr(struct vmcb *vmcb, int type)
|
||||
}
|
||||
|
||||
static int
|
||||
vmcb_access(struct svm_softc *softc, struct svm_vcpu *vcpu, int write,
|
||||
int ident, uint64_t *val)
|
||||
vmcb_access(struct svm_vcpu *vcpu, int write, int ident, uint64_t *val)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
int off, bytes;
|
||||
@ -146,7 +145,7 @@ vmcb_access(struct svm_softc *softc, struct svm_vcpu *vcpu, int write,
|
||||
memcpy(val, ptr + off, bytes);
|
||||
break;
|
||||
default:
|
||||
VCPU_CTR1(softc->vm, vcpu->vcpuid,
|
||||
VCPU_CTR1(vcpu->sc->vm, vcpu->vcpuid,
|
||||
"Invalid size %d for VMCB access: %d", bytes);
|
||||
return (EINVAL);
|
||||
}
|
||||
@ -162,8 +161,7 @@ vmcb_access(struct svm_softc *softc, struct svm_vcpu *vcpu, int write,
|
||||
* Read from segment selector, control and general purpose register of VMCB.
|
||||
*/
|
||||
int
|
||||
vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t *retval)
|
||||
vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
struct vmcb_state *state;
|
||||
@ -175,7 +173,7 @@ vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
err = 0;
|
||||
|
||||
if (VMCB_ACCESS_OK(ident))
|
||||
return (vmcb_access(sc, vcpu, 0, ident, retval));
|
||||
return (vmcb_access(vcpu, 0, ident, retval));
|
||||
|
||||
switch (ident) {
|
||||
case VM_REG_GUEST_CR0:
|
||||
@ -253,7 +251,7 @@ vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
* Write to segment selector, control and general purpose register of VMCB.
|
||||
*/
|
||||
int
|
||||
vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident, uint64_t val)
|
||||
vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
struct vmcb_state *state;
|
||||
@ -266,7 +264,7 @@ vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident, uint64_t val)
|
||||
err = 0;
|
||||
|
||||
if (VMCB_ACCESS_OK(ident))
|
||||
return (vmcb_access(sc, vcpu, 1, ident, &val));
|
||||
return (vmcb_access(vcpu, 1, ident, &val));
|
||||
|
||||
switch (ident) {
|
||||
case VM_REG_GUEST_CR0:
|
||||
@ -366,8 +364,7 @@ vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
struct seg_desc *desc)
|
||||
vmcb_setdesc(struct svm_vcpu *vcpu, int reg, struct seg_desc *desc)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
struct vmcb_segment *seg;
|
||||
@ -395,8 +392,9 @@ vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
seg->attrib = attrib;
|
||||
}
|
||||
|
||||
VCPU_CTR4(sc->vm, vcpu->vcpuid, "Setting desc %d: base (%#lx), limit (%#x), "
|
||||
"attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
|
||||
VCPU_CTR4(vcpu->sc->vm, vcpu->vcpuid, "Setting desc %d: base (%#lx), "
|
||||
"limit (%#x), attrib (%#x)", reg, seg->base, seg->limit,
|
||||
seg->attrib);
|
||||
|
||||
switch (reg) {
|
||||
case VM_REG_GUEST_CS:
|
||||
@ -417,8 +415,7 @@ vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
struct seg_desc *desc)
|
||||
vmcb_getdesc(struct svm_vcpu *vcpu, int reg, struct seg_desc *desc)
|
||||
{
|
||||
struct vmcb *vmcb;
|
||||
struct vmcb_segment *seg;
|
||||
@ -458,8 +455,7 @@ vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
int
|
||||
vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t *val)
|
||||
vmcb_getany(struct svm_vcpu *vcpu, int ident, uint64_t *val)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
@ -468,15 +464,14 @@ vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
goto err;
|
||||
}
|
||||
|
||||
error = vmcb_read(sc, vcpu, ident, val);
|
||||
error = vmcb_read(vcpu, ident, val);
|
||||
|
||||
err:
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t val)
|
||||
vmcb_setany(struct svm_vcpu *vcpu, int ident, uint64_t val)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
@ -485,21 +480,21 @@ vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
goto err;
|
||||
}
|
||||
|
||||
error = vmcb_write(sc, vcpu, ident, val);
|
||||
error = vmcb_write(vcpu, ident, val);
|
||||
|
||||
err:
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
vmcb_snapshot_desc(struct svm_vcpu *vcpu, int reg,
|
||||
struct vm_snapshot_meta *meta)
|
||||
{
|
||||
int ret;
|
||||
struct seg_desc desc;
|
||||
|
||||
if (meta->op == VM_SNAPSHOT_SAVE) {
|
||||
ret = vmcb_getdesc(sc, vcpu, reg, &desc);
|
||||
ret = vmcb_getdesc(vcpu, reg, &desc);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
|
||||
@ -511,7 +506,7 @@ vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
|
||||
SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
|
||||
|
||||
ret = vmcb_setdesc(sc, vcpu, reg, &desc);
|
||||
ret = vmcb_setdesc(vcpu, reg, &desc);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
} else {
|
||||
@ -524,14 +519,14 @@ vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
}
|
||||
|
||||
int
|
||||
vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
struct vm_snapshot_meta *meta)
|
||||
vmcb_snapshot_any(struct svm_vcpu *vcpu, int ident,
|
||||
struct vm_snapshot_meta *meta)
|
||||
{
|
||||
int ret;
|
||||
uint64_t val;
|
||||
|
||||
if (meta->op == VM_SNAPSHOT_SAVE) {
|
||||
ret = vmcb_getany(sc, vcpu, ident, &val);
|
||||
ret = vmcb_getany(vcpu, ident, &val);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
|
||||
@ -539,7 +534,7 @@ vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
} else if (meta->op == VM_SNAPSHOT_RESTORE) {
|
||||
SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
|
||||
|
||||
ret = vmcb_setany(sc, vcpu, ident, val);
|
||||
ret = vmcb_setany(vcpu, ident, val);
|
||||
if (ret != 0)
|
||||
goto done;
|
||||
} else {
|
||||
|
@ -354,23 +354,17 @@ struct vmcb {
|
||||
CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
|
||||
CTASSERT(offsetof(struct vmcb, state) == 0x400);
|
||||
|
||||
int vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t *retval);
|
||||
int vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t val);
|
||||
int vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
struct seg_desc *desc);
|
||||
int vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
struct seg_desc *desc);
|
||||
int vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval);
|
||||
int vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val);
|
||||
int vmcb_setdesc(struct svm_vcpu *vcpu, int ident, struct seg_desc *desc);
|
||||
int vmcb_getdesc(struct svm_vcpu *vcpu, int ident, struct seg_desc *desc);
|
||||
int vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg);
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
int vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t *val);
|
||||
int vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
|
||||
uint64_t val);
|
||||
int vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
|
||||
int vmcb_getany(struct svm_vcpu *vcpu, int ident, uint64_t *val);
|
||||
int vmcb_setany(struct svm_vcpu *vcpu, int ident, uint64_t val);
|
||||
int vmcb_snapshot_desc(struct svm_vcpu *vcpu, int reg,
|
||||
struct vm_snapshot_meta *meta);
|
||||
int vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu*vcpu, int ident,
|
||||
int vmcb_snapshot_any(struct svm_vcpu*vcpu, int ident,
|
||||
struct vm_snapshot_meta *meta);
|
||||
#endif
|
||||
|
||||
|
@ -310,12 +310,12 @@ SDT_PROBE_DEFINE4(vmm, vmx, exit, return,
|
||||
*/
|
||||
#define APIC_ACCESS_ADDRESS 0xFFFFF000
|
||||
|
||||
static int vmx_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc);
|
||||
static int vmx_getreg(void *arg, void *vcpui, int reg, uint64_t *retval);
|
||||
static int vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc);
|
||||
static int vmx_getreg(void *vcpui, int reg, uint64_t *retval);
|
||||
static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
|
||||
static void vmx_inject_pir(struct vlapic *vlapic);
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
static int vmx_restore_tsc(void *arg, void *vcpui, uint64_t now);
|
||||
static int vmx_restore_tsc(void *vcpui, uint64_t now);
|
||||
#endif
|
||||
|
||||
static inline bool
|
||||
@ -1110,15 +1110,16 @@ vmx_init(struct vm *vm, pmap_t pmap)
|
||||
}
|
||||
|
||||
static void *
|
||||
vmx_vcpu_init(void *arg, int vcpuid)
|
||||
vmx_vcpu_init(void *vmi, int vcpuid)
|
||||
{
|
||||
struct vmx *vmx = arg;
|
||||
struct vmx *vmx = vmi;
|
||||
struct vmcs *vmcs;
|
||||
struct vmx_vcpu *vcpu;
|
||||
uint32_t exc_bitmap;
|
||||
int error;
|
||||
|
||||
vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO);
|
||||
vcpu->vmx = vmx;
|
||||
vcpu->vcpuid = vcpuid;
|
||||
vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX,
|
||||
M_WAITOK | M_ZERO);
|
||||
@ -1235,30 +1236,31 @@ vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
|
||||
}
|
||||
|
||||
static __inline void
|
||||
vmx_run_trace(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
vmx_run_trace(struct vmx_vcpu *vcpu)
|
||||
{
|
||||
#ifdef KTR
|
||||
VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Resume execution at %#lx",
|
||||
VCPU_CTR1(vcpu->vmx->vm, vcpu->vcpuid, "Resume execution at %#lx",
|
||||
vmcs_guest_rip());
|
||||
#endif
|
||||
}
|
||||
|
||||
static __inline void
|
||||
vmx_exit_trace(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t rip,
|
||||
uint32_t exit_reason, int handled)
|
||||
vmx_exit_trace(struct vmx_vcpu *vcpu, uint64_t rip, uint32_t exit_reason,
|
||||
int handled)
|
||||
{
|
||||
#ifdef KTR
|
||||
VCPU_CTR3(vmx->vm, vcpu->vcpuid, "%s %s vmexit at 0x%0lx",
|
||||
VCPU_CTR3(vcpu->vmx->vm, vcpu->vcpuid, "%s %s vmexit at 0x%0lx",
|
||||
handled ? "handled" : "unhandled",
|
||||
exit_reason_to_str(exit_reason), rip);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __inline void
|
||||
vmx_astpending_trace(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t rip)
|
||||
vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip)
|
||||
{
|
||||
#ifdef KTR
|
||||
VCPU_CTR1(vmx->vm, vcpu->vcpuid, "astpending vmexit at 0x%0lx", rip);
|
||||
VCPU_CTR1(vcpu->vmx->vm, vcpu->vcpuid, "astpending vmexit at 0x%0lx",
|
||||
rip);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1350,48 +1352,50 @@ vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap)
|
||||
CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
|
||||
|
||||
static void __inline
|
||||
vmx_set_int_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
vmx_set_int_window_exiting(struct vmx_vcpu *vcpu)
|
||||
{
|
||||
|
||||
if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
|
||||
vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
|
||||
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
|
||||
VCPU_CTR0(vmx->vm, vcpu->vcpuid,
|
||||
VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid,
|
||||
"Enabling interrupt window exiting");
|
||||
}
|
||||
}
|
||||
|
||||
static void __inline
|
||||
vmx_clear_int_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
vmx_clear_int_window_exiting(struct vmx_vcpu *vcpu)
|
||||
{
|
||||
|
||||
KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
|
||||
("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls));
|
||||
vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
|
||||
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
|
||||
VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Disabling interrupt window exiting");
|
||||
VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid,
|
||||
"Disabling interrupt window exiting");
|
||||
}
|
||||
|
||||
static void __inline
|
||||
vmx_set_nmi_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
vmx_set_nmi_window_exiting(struct vmx_vcpu *vcpu)
|
||||
{
|
||||
|
||||
if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
|
||||
vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
|
||||
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
|
||||
VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Enabling NMI window exiting");
|
||||
VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid,
|
||||
"Enabling NMI window exiting");
|
||||
}
|
||||
}
|
||||
|
||||
static void __inline
|
||||
vmx_clear_nmi_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu)
|
||||
{
|
||||
|
||||
KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
|
||||
("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls));
|
||||
vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
|
||||
vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
|
||||
VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Disabling NMI window exiting");
|
||||
VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Disabling NMI window exiting");
|
||||
}
|
||||
|
||||
int
|
||||
@ -1518,7 +1522,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
|
||||
}
|
||||
|
||||
if (need_nmi_exiting)
|
||||
vmx_set_nmi_window_exiting(vmx, vcpu);
|
||||
vmx_set_nmi_window_exiting(vcpu);
|
||||
}
|
||||
|
||||
extint_pending = vm_extint_pending(vmx->vm, vcpu->vcpuid);
|
||||
@ -1618,7 +1622,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
|
||||
* as soon as possible. This applies both for the software
|
||||
* emulated vlapic and the hardware assisted virtual APIC.
|
||||
*/
|
||||
vmx_set_int_window_exiting(vmx, vcpu);
|
||||
vmx_set_int_window_exiting(vcpu);
|
||||
}
|
||||
|
||||
VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Injecting hwintr at vector %d",
|
||||
@ -1631,7 +1635,7 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
|
||||
* Set the Interrupt Window Exiting execution control so we can inject
|
||||
* the interrupt as soon as blocking condition goes away.
|
||||
*/
|
||||
vmx_set_int_window_exiting(vmx, vcpu);
|
||||
vmx_set_int_window_exiting(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1644,29 +1648,29 @@ vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
|
||||
* hypervisor needs to restore virtual-NMI blocking before resuming the guest.
|
||||
*/
|
||||
static void
|
||||
vmx_restore_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
vmx_restore_nmi_blocking(struct vmx_vcpu *vcpu)
|
||||
{
|
||||
uint32_t gi;
|
||||
|
||||
VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Restore Virtual-NMI blocking");
|
||||
VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Restore Virtual-NMI blocking");
|
||||
gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
|
||||
gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
|
||||
vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
|
||||
}
|
||||
|
||||
static void
|
||||
vmx_clear_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
vmx_clear_nmi_blocking(struct vmx_vcpu *vcpu)
|
||||
{
|
||||
uint32_t gi;
|
||||
|
||||
VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Clear Virtual-NMI blocking");
|
||||
VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Clear Virtual-NMI blocking");
|
||||
gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
|
||||
gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
|
||||
vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
|
||||
}
|
||||
|
||||
static void
|
||||
vmx_assert_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
|
||||
vmx_assert_nmi_blocking(struct vmx_vcpu *vcpu)
|
||||
{
|
||||
uint32_t gi __diagused;
|
||||
|
||||
@ -1991,26 +1995,26 @@ vmx_paging_mode(void)
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
inout_str_index(struct vmx *vmx, struct vmx_vcpu *vcpu, int in)
|
||||
inout_str_index(struct vmx_vcpu *vcpu, int in)
|
||||
{
|
||||
uint64_t val;
|
||||
int error __diagused;
|
||||
enum vm_reg_name reg;
|
||||
|
||||
reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
|
||||
error = vmx_getreg(vmx, vcpu, reg, &val);
|
||||
error = vmx_getreg(vcpu, reg, &val);
|
||||
KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
|
||||
return (val);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
inout_str_count(struct vmx *vmx, struct vmx_vcpu *vcpu, int rep)
|
||||
inout_str_count(struct vmx_vcpu *vcpu, int rep)
|
||||
{
|
||||
uint64_t val;
|
||||
int error __diagused;
|
||||
|
||||
if (rep) {
|
||||
error = vmx_getreg(vmx, vcpu, VM_REG_GUEST_RCX, &val);
|
||||
error = vmx_getreg(vcpu, VM_REG_GUEST_RCX, &val);
|
||||
KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
|
||||
} else {
|
||||
val = 1;
|
||||
@ -2037,8 +2041,8 @@ inout_str_addrsize(uint32_t inst_info)
|
||||
}
|
||||
|
||||
static void
|
||||
inout_str_seginfo(struct vmx *vmx, struct vmx_vcpu *vcpu, uint32_t inst_info,
|
||||
int in, struct vm_inout_str *vis)
|
||||
inout_str_seginfo(struct vmx_vcpu *vcpu, uint32_t inst_info, int in,
|
||||
struct vm_inout_str *vis)
|
||||
{
|
||||
int error __diagused, s;
|
||||
|
||||
@ -2049,7 +2053,7 @@ inout_str_seginfo(struct vmx *vmx, struct vmx_vcpu *vcpu, uint32_t inst_info,
|
||||
vis->seg_name = vm_segment_name(s);
|
||||
}
|
||||
|
||||
error = vmx_getdesc(vmx, vcpu, vis->seg_name, &vis->seg_desc);
|
||||
error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
|
||||
KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
|
||||
}
|
||||
|
||||
@ -2435,9 +2439,9 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
|
||||
intr_type = idtvec_info & VMCS_INTR_T_MASK;
|
||||
if (intr_type == VMCS_INTR_T_NMI) {
|
||||
if (reason != EXIT_REASON_TASK_SWITCH)
|
||||
vmx_clear_nmi_blocking(vmx, vcpu);
|
||||
vmx_clear_nmi_blocking(vcpu);
|
||||
else
|
||||
vmx_assert_nmi_blocking(vmx, vcpu);
|
||||
vmx_assert_nmi_blocking(vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2578,7 +2582,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
|
||||
case EXIT_REASON_INTR_WINDOW:
|
||||
vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INTR_WINDOW, 1);
|
||||
SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpuid, vmexit);
|
||||
vmx_clear_int_window_exiting(vmx, vcpu);
|
||||
vmx_clear_int_window_exiting(vcpu);
|
||||
return (1);
|
||||
case EXIT_REASON_EXT_INTR:
|
||||
/*
|
||||
@ -2616,7 +2620,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
|
||||
/* Exit to allow the pending virtual NMI to be injected */
|
||||
if (vm_nmi_pending(vmx->vm, vcpuid))
|
||||
vmx_inject_nmi(vmx, vcpu);
|
||||
vmx_clear_nmi_window_exiting(vmx, vcpu);
|
||||
vmx_clear_nmi_window_exiting(vcpu);
|
||||
vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NMI_WINDOW, 1);
|
||||
return (1);
|
||||
case EXIT_REASON_INOUT:
|
||||
@ -2635,10 +2639,10 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
|
||||
vmx_paging_info(&vis->paging);
|
||||
vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
|
||||
vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
|
||||
vis->index = inout_str_index(vmx, vcpu, in);
|
||||
vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
|
||||
vis->index = inout_str_index(vcpu, in);
|
||||
vis->count = inout_str_count(vcpu, vis->inout.rep);
|
||||
vis->addrsize = inout_str_addrsize(inst_info);
|
||||
inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
|
||||
inout_str_seginfo(vcpu, inst_info, in, vis);
|
||||
}
|
||||
SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpuid, vmexit);
|
||||
break;
|
||||
@ -2668,7 +2672,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
|
||||
if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
|
||||
(intr_vec != IDT_DF) &&
|
||||
(intr_info & EXIT_QUAL_NMIUDTI) != 0)
|
||||
vmx_restore_nmi_blocking(vmx, vcpu);
|
||||
vmx_restore_nmi_blocking(vcpu);
|
||||
|
||||
/*
|
||||
* The NMI has already been handled in vmx_exit_handle_nmi().
|
||||
@ -2761,7 +2765,7 @@ vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
|
||||
*/
|
||||
if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
|
||||
(qual & EXIT_QUAL_NMIUDTI) != 0)
|
||||
vmx_restore_nmi_blocking(vmx, vcpu);
|
||||
vmx_restore_nmi_blocking(vcpu);
|
||||
break;
|
||||
case EXIT_REASON_VIRTUALIZED_EOI:
|
||||
vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
|
||||
@ -2898,8 +2902,7 @@ vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
|
||||
* clear NMI blocking.
|
||||
*/
|
||||
static __inline void
|
||||
vmx_exit_handle_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu,
|
||||
struct vm_exit *vmexit)
|
||||
vmx_exit_handle_nmi(struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
|
||||
{
|
||||
uint32_t intr_info;
|
||||
|
||||
@ -2915,7 +2918,8 @@ vmx_exit_handle_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu,
|
||||
if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
|
||||
KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
|
||||
"to NMI has invalid vector: %#x", intr_info));
|
||||
VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Vectoring to NMI handler");
|
||||
VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid,
|
||||
"Vectoring to NMI handler");
|
||||
__asm __volatile("int $2");
|
||||
}
|
||||
}
|
||||
@ -3013,8 +3017,7 @@ vmx_pmap_deactivate(struct vmx *vmx, pmap_t pmap)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
struct vm_eventinfo *evinfo)
|
||||
vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
|
||||
{
|
||||
int rc, handled, launched, vcpuid;
|
||||
struct vmx *vmx;
|
||||
@ -3028,9 +3031,9 @@ vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
struct region_descriptor gdtr, idtr;
|
||||
uint16_t ldt_sel;
|
||||
|
||||
vmx = arg;
|
||||
vm = vmx->vm;
|
||||
vcpu = vcpui;
|
||||
vmx = vcpu->vmx;
|
||||
vm = vmx->vm;
|
||||
vcpuid = vcpu->vcpuid;
|
||||
vmcs = vcpu->vmcs;
|
||||
vmxctx = &vcpu->ctx;
|
||||
@ -3109,7 +3112,7 @@ vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
if (vcpu_should_yield(vm, vcpuid)) {
|
||||
enable_intr();
|
||||
vm_exit_astpending(vmx->vm, vcpuid, rip);
|
||||
vmx_astpending_trace(vmx, vcpu, rip);
|
||||
vmx_astpending_trace(vcpu, rip);
|
||||
handled = HANDLED;
|
||||
break;
|
||||
}
|
||||
@ -3171,7 +3174,7 @@ vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
*/
|
||||
vmx_pmap_activate(vmx, pmap);
|
||||
|
||||
vmx_run_trace(vmx, vcpu);
|
||||
vmx_run_trace(vcpu);
|
||||
rc = vmx_enter_guest(vmxctx, vmx, launched);
|
||||
|
||||
vmx_pmap_deactivate(vmx, pmap);
|
||||
@ -3192,7 +3195,7 @@ vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
vcpu->state.nextrip = rip;
|
||||
|
||||
if (rc == VMX_GUEST_VMEXIT) {
|
||||
vmx_exit_handle_nmi(vmx, vcpu, vmexit);
|
||||
vmx_exit_handle_nmi(vcpu, vmexit);
|
||||
enable_intr();
|
||||
handled = vmx_exit_process(vmx, vcpu, vmexit);
|
||||
} else {
|
||||
@ -3200,7 +3203,7 @@ vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
vmx_exit_inst_error(vmxctx, rc, vmexit);
|
||||
}
|
||||
launched = 1;
|
||||
vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
|
||||
vmx_exit_trace(vcpu, rip, exit_reason, handled);
|
||||
rip = vmexit->rip;
|
||||
} while (handled);
|
||||
|
||||
@ -3224,7 +3227,7 @@ vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
|
||||
}
|
||||
|
||||
static void
|
||||
vmx_vcpu_cleanup(void *arg, void *vcpui)
|
||||
vmx_vcpu_cleanup(void *vcpui)
|
||||
{
|
||||
struct vmx_vcpu *vcpu = vcpui;
|
||||
|
||||
@ -3236,9 +3239,9 @@ vmx_vcpu_cleanup(void *arg, void *vcpui)
|
||||
}
|
||||
|
||||
static void
|
||||
vmx_cleanup(void *arg)
|
||||
vmx_cleanup(void *vmi)
|
||||
{
|
||||
struct vmx *vmx = arg;
|
||||
struct vmx *vmx = vmi;
|
||||
|
||||
if (virtual_interrupt_delivery)
|
||||
vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
|
||||
@ -3339,8 +3342,7 @@ vmx_get_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t *retval)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_modify_intr_shadow(struct vmx *vmx, struct vmx_vcpu *vcpu, int running,
|
||||
uint64_t val)
|
||||
vmx_modify_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t val)
|
||||
{
|
||||
struct vmcs *vmcs;
|
||||
uint64_t gi;
|
||||
@ -3362,8 +3364,8 @@ vmx_modify_intr_shadow(struct vmx *vmx, struct vmx_vcpu *vcpu, int running,
|
||||
error = vmcs_setreg(vmcs, running, ident, gi);
|
||||
}
|
||||
done:
|
||||
VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Setting intr_shadow to %#lx %s", val,
|
||||
error ? "failed" : "succeeded");
|
||||
VCPU_CTR2(vcpu->vmx->vm, vcpu->vcpuid, "Setting intr_shadow to %#lx %s",
|
||||
val, error ? "failed" : "succeeded");
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -3389,11 +3391,11 @@ vmx_shadow_reg(int reg)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_getreg(void *arg, void *vcpui, int reg, uint64_t *retval)
|
||||
vmx_getreg(void *vcpui, int reg, uint64_t *retval)
|
||||
{
|
||||
int running, hostcpu;
|
||||
struct vmx *vmx = arg;
|
||||
struct vmx_vcpu *vcpu = vcpui;
|
||||
struct vmx *vmx = vcpu->vmx;
|
||||
|
||||
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
|
||||
if (running && hostcpu != curcpu)
|
||||
@ -3410,13 +3412,13 @@ vmx_getreg(void *arg, void *vcpui, int reg, uint64_t *retval)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_setreg(void *arg, void *vcpui, int reg, uint64_t val)
|
||||
vmx_setreg(void *vcpui, int reg, uint64_t val)
|
||||
{
|
||||
int error, hostcpu, running, shadow;
|
||||
uint64_t ctls;
|
||||
pmap_t pmap;
|
||||
struct vmx *vmx = arg;
|
||||
struct vmx_vcpu *vcpu = vcpui;
|
||||
struct vmx *vmx = vcpu->vmx;
|
||||
|
||||
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
|
||||
if (running && hostcpu != curcpu)
|
||||
@ -3424,7 +3426,7 @@ vmx_setreg(void *arg, void *vcpui, int reg, uint64_t val)
|
||||
vcpu->vcpuid);
|
||||
|
||||
if (reg == VM_REG_GUEST_INTR_SHADOW)
|
||||
return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
|
||||
return (vmx_modify_intr_shadow(vcpu, running, val));
|
||||
|
||||
if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0)
|
||||
return (0);
|
||||
@ -3479,11 +3481,11 @@ vmx_setreg(void *arg, void *vcpui, int reg, uint64_t val)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
|
||||
vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc)
|
||||
{
|
||||
int hostcpu, running;
|
||||
struct vmx *vmx = arg;
|
||||
struct vmx_vcpu *vcpu = vcpui;
|
||||
struct vmx *vmx = vcpu->vmx;
|
||||
|
||||
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
|
||||
if (running && hostcpu != curcpu)
|
||||
@ -3494,11 +3496,11 @@ vmx_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_setdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
|
||||
vmx_setdesc(void *vcpui, int reg, struct seg_desc *desc)
|
||||
{
|
||||
int hostcpu, running;
|
||||
struct vmx *vmx = arg;
|
||||
struct vmx_vcpu *vcpu = vcpui;
|
||||
struct vmx *vmx = vcpu->vmx;
|
||||
|
||||
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
|
||||
if (running && hostcpu != curcpu)
|
||||
@ -3509,7 +3511,7 @@ vmx_setdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_getcap(void *arg, void *vcpui, int type, int *retval)
|
||||
vmx_getcap(void *vcpui, int type, int *retval)
|
||||
{
|
||||
struct vmx_vcpu *vcpu = vcpui;
|
||||
int vcap;
|
||||
@ -3563,9 +3565,8 @@ vmx_getcap(void *arg, void *vcpui, int type, int *retval)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_setcap(void *arg, void *vcpui, int type, int val)
|
||||
vmx_setcap(void *vcpui, int type, int val)
|
||||
{
|
||||
struct vmx *vmx = arg;
|
||||
struct vmx_vcpu *vcpu = vcpui;
|
||||
struct vmcs *vmcs = vcpu->vmcs;
|
||||
struct vlapic *vlapic;
|
||||
@ -3650,7 +3651,7 @@ vmx_setcap(void *arg, void *vcpui, int type, int val)
|
||||
case VM_CAP_IPI_EXIT:
|
||||
retval = 0;
|
||||
|
||||
vlapic = vm_lapic(vmx->vm, vcpu->vcpuid);
|
||||
vlapic = vm_lapic(vcpu->vmx->vm, vcpu->vcpuid);
|
||||
vlapic->ipi_exit = val;
|
||||
break;
|
||||
default:
|
||||
@ -3704,7 +3705,6 @@ vmx_vmspace_free(struct vmspace *vmspace)
|
||||
struct vlapic_vtx {
|
||||
struct vlapic vlapic;
|
||||
struct pir_desc *pir_desc;
|
||||
struct vmx *vmx;
|
||||
struct vmx_vcpu *vcpu;
|
||||
u_int pending_prio;
|
||||
};
|
||||
@ -3934,8 +3934,8 @@ vmx_enable_x2apic_mode_vid(struct vlapic *vlapic)
|
||||
int error __diagused;
|
||||
|
||||
vlapic_vtx = (struct vlapic_vtx *)vlapic;
|
||||
vmx = vlapic_vtx->vmx;
|
||||
vcpu = vlapic_vtx->vcpu;
|
||||
vmx = vcpu->vmx;
|
||||
vmcs = vcpu->vmcs;
|
||||
|
||||
proc_ctls2 = vcpu->cap.proc_ctls2;
|
||||
@ -4071,15 +4071,15 @@ vmx_inject_pir(struct vlapic *vlapic)
|
||||
}
|
||||
|
||||
static struct vlapic *
|
||||
vmx_vlapic_init(void *arg, void *vcpui)
|
||||
vmx_vlapic_init(void *vcpui)
|
||||
{
|
||||
struct vmx *vmx;
|
||||
struct vmx_vcpu *vcpu;
|
||||
struct vlapic *vlapic;
|
||||
struct vlapic_vtx *vlapic_vtx;
|
||||
|
||||
vmx = arg;
|
||||
vcpu = vcpui;
|
||||
vmx = vcpu->vmx;
|
||||
|
||||
vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
|
||||
vlapic->vm = vmx->vm;
|
||||
@ -4088,7 +4088,6 @@ vmx_vlapic_init(void *arg, void *vcpui)
|
||||
|
||||
vlapic_vtx = (struct vlapic_vtx *)vlapic;
|
||||
vlapic_vtx->pir_desc = vcpu->pir_desc;
|
||||
vlapic_vtx->vmx = vmx;
|
||||
vlapic_vtx->vcpu = vcpu;
|
||||
|
||||
if (tpr_shadowing) {
|
||||
@ -4112,7 +4111,7 @@ vmx_vlapic_init(void *arg, void *vcpui)
|
||||
}
|
||||
|
||||
static void
|
||||
vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
|
||||
vmx_vlapic_cleanup(struct vlapic *vlapic)
|
||||
{
|
||||
|
||||
vlapic_cleanup(vlapic);
|
||||
@ -4121,13 +4120,13 @@ vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
|
||||
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
static int
|
||||
vmx_snapshot(void *arg, struct vm_snapshot_meta *meta)
|
||||
vmx_snapshot(void *vmi, struct vm_snapshot_meta *meta)
|
||||
{
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
|
||||
vmx_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta)
|
||||
{
|
||||
struct vmcs *vmcs;
|
||||
struct vmx *vmx;
|
||||
@ -4135,11 +4134,9 @@ vmx_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
|
||||
struct vmxctx *vmxctx;
|
||||
int err, run, hostcpu;
|
||||
|
||||
vmx = (struct vmx *)arg;
|
||||
vcpu = vcpui;
|
||||
err = 0;
|
||||
|
||||
KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
|
||||
vcpu = vcpui;
|
||||
vmx = vcpu->vmx;
|
||||
vmcs = vcpu->vmcs;
|
||||
|
||||
run = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
|
||||
@ -4235,14 +4232,14 @@ vmx_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
|
||||
}
|
||||
|
||||
static int
|
||||
vmx_restore_tsc(void *arg, void *vcpui, uint64_t offset)
|
||||
vmx_restore_tsc(void *vcpui, uint64_t offset)
|
||||
{
|
||||
struct vmcs *vmcs;
|
||||
struct vmx *vmx = (struct vmx *)arg;
|
||||
struct vmx_vcpu *vcpu = vcpui;
|
||||
struct vmcs *vmcs;
|
||||
struct vmx *vmx;
|
||||
int error, running, hostcpu;
|
||||
|
||||
KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
|
||||
vmx = vcpu->vmx;
|
||||
vmcs = vcpu->vmcs;
|
||||
|
||||
running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "x86.h"
|
||||
|
||||
struct pmap;
|
||||
struct vmx;
|
||||
|
||||
struct vmxctx {
|
||||
register_t guest_rdi; /* Guest state */
|
||||
@ -126,6 +127,7 @@ enum {
|
||||
};
|
||||
|
||||
struct vmx_vcpu {
|
||||
struct vmx *vmx;
|
||||
struct vmcs *vmcs;
|
||||
struct apic_page *apic_page;
|
||||
struct pir_desc *pir_desc;
|
||||
|
@ -209,32 +209,27 @@ DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
|
||||
DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
|
||||
DEFINE_VMMOPS_IFUNC(void, modresume, (void))
|
||||
DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
|
||||
DEFINE_VMMOPS_IFUNC(int, run, (void *vmi, void *vcpui, register_t rip,
|
||||
struct pmap *pmap, struct vm_eventinfo *info))
|
||||
DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t rip, struct pmap *pmap,
|
||||
struct vm_eventinfo *info))
|
||||
DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
|
||||
DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, int vcpu_id))
|
||||
DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vmi, void *vcpui))
|
||||
DEFINE_VMMOPS_IFUNC(int, getreg, (void *vmi, void *vcpui, int num,
|
||||
uint64_t *retval))
|
||||
DEFINE_VMMOPS_IFUNC(int, setreg, (void *vmi, void *vcpui, int num,
|
||||
uint64_t val))
|
||||
DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vmi, void *vcpui, int num,
|
||||
struct seg_desc *desc))
|
||||
DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vmi, void *vcpui, int num,
|
||||
struct seg_desc *desc))
|
||||
DEFINE_VMMOPS_IFUNC(int, getcap, (void *vmi, void *vcpui, int num, int *retval))
|
||||
DEFINE_VMMOPS_IFUNC(int, setcap, (void *vmi, void *vcpui, int num, int val))
|
||||
DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
|
||||
DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
|
||||
DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
|
||||
DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vcpui, int num, struct seg_desc *desc))
|
||||
DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vcpui, int num, struct seg_desc *desc))
|
||||
DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
|
||||
DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
|
||||
DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
|
||||
vm_offset_t max))
|
||||
DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
|
||||
DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vmi, void *vcpui))
|
||||
DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (void *vmi, struct vlapic *vlapic))
|
||||
DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vcpui))
|
||||
DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (struct vlapic *vlapic))
|
||||
#ifdef BHYVE_SNAPSHOT
|
||||
DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta
|
||||
*meta))
|
||||
DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vmi, struct vm_snapshot_meta
|
||||
*meta, void *vcpui))
|
||||
DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vmi, void *vcpui, uint64_t now))
|
||||
DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta))
|
||||
DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
|
||||
struct vm_snapshot_meta *meta))
|
||||
DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
|
||||
#endif
|
||||
|
||||
#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
|
||||
@ -307,8 +302,8 @@ vcpu_cleanup(struct vm *vm, int i, bool destroy)
|
||||
{
|
||||
struct vcpu *vcpu = &vm->vcpu[i];
|
||||
|
||||
vmmops_vlapic_cleanup(vm->cookie, vcpu->vlapic);
|
||||
vmmops_vcpu_cleanup(vm->cookie, vcpu->cookie);
|
||||
vmmops_vlapic_cleanup(vcpu->vlapic);
|
||||
vmmops_vcpu_cleanup(vcpu->cookie);
|
||||
vcpu->cookie = NULL;
|
||||
if (destroy) {
|
||||
vmm_stat_free(vcpu->stats);
|
||||
@ -338,7 +333,7 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
|
||||
}
|
||||
|
||||
vcpu->cookie = vmmops_vcpu_init(vm->cookie, vcpu_id);
|
||||
vcpu->vlapic = vmmops_vlapic_init(vm->cookie, vcpu->cookie);
|
||||
vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
|
||||
vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
|
||||
vcpu->reqidle = 0;
|
||||
vcpu->exitintinfo = 0;
|
||||
@ -1082,8 +1077,7 @@ vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
|
||||
if (reg >= VM_REG_LAST)
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_getreg(vm->cookie, vcpu_cookie(vm, vcpu), reg,
|
||||
retval));
|
||||
return (vmmops_getreg(vcpu_cookie(vm, vcpu), reg, retval));
|
||||
}
|
||||
|
||||
int
|
||||
@ -1099,7 +1093,7 @@ vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
|
||||
return (EINVAL);
|
||||
|
||||
vcpu = &vm->vcpu[vcpuid];
|
||||
error = vmmops_setreg(vm->cookie, vcpu->cookie, reg, val);
|
||||
error = vmmops_setreg(vcpu->cookie, reg, val);
|
||||
if (error || reg != VM_REG_GUEST_RIP)
|
||||
return (error);
|
||||
|
||||
@ -1152,7 +1146,7 @@ vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
|
||||
if (!is_segment_register(reg) && !is_descriptor_table(reg))
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_getdesc(vm->cookie, vcpu_cookie(vm, vcpu), reg, desc));
|
||||
return (vmmops_getdesc(vcpu_cookie(vm, vcpu), reg, desc));
|
||||
}
|
||||
|
||||
int
|
||||
@ -1165,7 +1159,7 @@ vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
|
||||
if (!is_segment_register(reg) && !is_descriptor_table(reg))
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_setdesc(vm->cookie, vcpu_cookie(vm, vcpu), reg, desc));
|
||||
return (vmmops_setdesc(vcpu_cookie(vm, vcpu), reg, desc));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1785,8 +1779,7 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
|
||||
restore_guest_fpustate(vcpu);
|
||||
|
||||
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
|
||||
error = vmmops_run(vm->cookie, vcpu->cookie, vcpu->nextrip, pmap,
|
||||
&evinfo);
|
||||
error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
|
||||
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
|
||||
|
||||
save_guest_fpustate(vcpu);
|
||||
@ -2292,7 +2285,7 @@ vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
|
||||
if (type < 0 || type >= VM_CAP_MAX)
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_getcap(vm->cookie, vcpu_cookie(vm, vcpu), type, retval));
|
||||
return (vmmops_getcap(vcpu_cookie(vm, vcpu), type, retval));
|
||||
}
|
||||
|
||||
int
|
||||
@ -2304,7 +2297,7 @@ vm_set_capability(struct vm *vm, int vcpu, int type, int val)
|
||||
if (type < 0 || type >= VM_CAP_MAX)
|
||||
return (EINVAL);
|
||||
|
||||
return (vmmops_setcap(vm->cookie, vcpu_cookie(vm, vcpu), type, val));
|
||||
return (vmmops_setcap(vcpu_cookie(vm, vcpu), type, val));
|
||||
}
|
||||
|
||||
struct vlapic *
|
||||
@ -2877,7 +2870,7 @@ vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta)
|
||||
for (i = 0; i < maxcpus; i++) {
|
||||
vcpu = &vm->vcpu[i];
|
||||
|
||||
error = vmmops_vcpu_snapshot(vm->cookie, meta, vcpu->cookie);
|
||||
error = vmmops_vcpu_snapshot(vcpu->cookie, meta);
|
||||
if (error != 0) {
|
||||
printf("%s: failed to snapshot vmcs/vmcb data for "
|
||||
"vCPU: %d; error: %d\n", __func__, i, error);
|
||||
@ -2968,7 +2961,7 @@ vm_restore_time(struct vm *vm)
|
||||
for (i = 0; i < maxcpus; i++) {
|
||||
vcpu = &vm->vcpu[i];
|
||||
|
||||
error = vmmops_restore_tsc(vm->cookie, vcpu->cookie,
|
||||
error = vmmops_restore_tsc(vcpu->cookie,
|
||||
vcpu->tsc_offset - now);
|
||||
if (error)
|
||||
return (error);
|
||||
|
Loading…
Reference in New Issue
Block a user