Queue pending exceptions in the 'struct vcpu' instead of directly updating the

processor-specific VMCS or VMCB. The pending exception will be delivered right
before entering the guest.

The order of event injection into the guest is:
- hardware exception
- NMI
- maskable interrupt

In the Intel VT-x case, a pending NMI or interrupt will enable the interrupt
window-exiting and inject it as soon as possible after the hardware exception
is injected. Also since interrupts are inherently asynchronous, injecting
them after the hardware exception should not affect correctness from the
guest perspective.

Rename the unused ioctl VM_INJECT_EVENT to VM_INJECT_EXCEPTION and restrict
it to only deliver x86 hardware exceptions. This new ioctl is now used to
inject a protection fault when the guest accesses an unimplemented MSR.

Discussed with:	grehan, jhb
Reviewed by:	jhb
This commit is contained in:
Neel Natu 2014-02-26 00:52:05 +00:00
parent 23659c9d9d
commit dc50650607
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=262506
10 changed files with 166 additions and 198 deletions

View File

@ -343,35 +343,32 @@ vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit)
}
static int
vm_inject_event_real(struct vmctx *ctx, int vcpu, enum vm_event_type type,
int vector, int error_code, int error_code_valid)
vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector,
int error_code, int error_code_valid)
{
struct vm_event ev;
struct vm_exception exc;
bzero(&ev, sizeof(ev));
ev.cpuid = vcpu;
ev.type = type;
ev.vector = vector;
ev.error_code = error_code;
ev.error_code_valid = error_code_valid;
bzero(&exc, sizeof(exc));
exc.cpuid = vcpu;
exc.vector = vector;
exc.error_code = error_code;
exc.error_code_valid = error_code_valid;
return (ioctl(ctx->fd, VM_INJECT_EVENT, &ev));
return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
}
int
vm_inject_event(struct vmctx *ctx, int vcpu, enum vm_event_type type,
int vector)
vm_inject_exception(struct vmctx *ctx, int vcpu, int vector)
{
return (vm_inject_event_real(ctx, vcpu, type, vector, 0, 0));
return (vm_inject_exception_real(ctx, vcpu, vector, 0, 0));
}
int
vm_inject_event2(struct vmctx *ctx, int vcpu, enum vm_event_type type,
int vector, int error_code)
vm_inject_exception2(struct vmctx *ctx, int vcpu, int vector, int errcode)
{
return (vm_inject_event_real(ctx, vcpu, type, vector, error_code, 1));
return (vm_inject_exception_real(ctx, vcpu, vector, errcode, 1));
}
int

View File

@ -62,10 +62,8 @@ int vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *retval);
int vm_run(struct vmctx *ctx, int vcpu, uint64_t rip,
struct vm_exit *ret_vmexit);
int vm_apicid2vcpu(struct vmctx *ctx, int apicid);
int vm_inject_event(struct vmctx *ctx, int vcpu, enum vm_event_type type,
int vector);
int vm_inject_event2(struct vmctx *ctx, int vcpu, enum vm_event_type type,
int vector, int error_code);
int vm_inject_exception(struct vmctx *ctx, int vcpu, int vec);
int vm_inject_exception2(struct vmctx *ctx, int vcpu, int vec, int errcode);
int vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector);
int vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector);
int vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg);

View File

@ -34,6 +34,7 @@
#define VM_MAX_NAMELEN 32
struct vm;
struct vm_exception;
struct vm_memory_segment;
struct seg_desc;
struct vm_exit;
@ -62,9 +63,6 @@ typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
struct seg_desc *desc);
typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
struct seg_desc *desc);
typedef int (*vmi_inject_event_t)(void *vmi, int vcpu,
int type, int vector,
uint32_t code, int code_valid);
typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
@ -84,7 +82,6 @@ struct vmm_ops {
vmi_set_register_t vmsetreg;
vmi_get_desc_t vmgetdesc;
vmi_set_desc_t vmsetdesc;
vmi_inject_event_t vminject;
vmi_get_cap_t vmgetcap;
vmi_set_cap_t vmsetcap;
vmi_vmspace_alloc vmspace_alloc;
@ -117,8 +114,6 @@ int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
struct seg_desc *desc);
int vm_run(struct vm *vm, struct vm_run *vmrun);
int vm_inject_event(struct vm *vm, int vcpu, int type,
int vector, uint32_t error_code, int error_code_valid);
int vm_inject_nmi(struct vm *vm, int vcpu);
int vm_nmi_pending(struct vm *vm, int vcpuid);
void vm_nmi_clear(struct vm *vm, int vcpuid);
@ -192,26 +187,39 @@ void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
struct vmspace *vm_get_vmspace(struct vm *vm);
int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
/*
* Inject exception 'vme' into the guest vcpu. This function returns 0 on
* success and non-zero on failure.
*
* Wrapper functions like 'vm_inject_gp()' should be preferred to calling
* this function directly because they enforce the trap-like or fault-like
* behavior of an exception.
*
* This function should only be called in the context of the thread that is
* executing this vcpu.
*/
int vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *vme);
/*
* Returns 0 if there is no exception pending for this vcpu. Returns 1 if an
* exception is pending and also updates 'vme'. The pending exception is
* cleared when this function returns.
*
* This function should only be called in the context of the thread that is
* executing this vcpu.
*/
int vm_exception_pending(struct vm *vm, int vcpuid, struct vm_exception *vme);
void vm_inject_gp(struct vm *vm, int vcpuid); /* general protection fault */
void vm_inject_ud(struct vm *vm, int vcpuid); /* undefined instruction fault */
#endif /* KERNEL */
#include <machine/vmm_instruction_emul.h>
#define VM_MAXCPU 16 /* maximum virtual cpus */
/*
* Identifiers for events that can be injected into the VM
*/
enum vm_event_type {
VM_EVENT_NONE,
VM_HW_INTR,
VM_NMI,
VM_HW_EXCEPTION,
VM_SW_INTR,
VM_PRIV_SW_EXCEPTION,
VM_SW_EXCEPTION,
VM_EVENT_MAX
};
/*
* Identifiers for architecturally defined registers.
*/

View File

@ -58,9 +58,8 @@ struct vm_run {
struct vm_exit vm_exit;
};
struct vm_event {
struct vm_exception {
int cpuid;
enum vm_event_type type;
int vector;
uint32_t error_code;
int error_code_valid;
@ -174,7 +173,7 @@ enum {
IOCNUM_GET_SEGMENT_DESCRIPTOR = 23,
/* interrupt injection */
IOCNUM_INJECT_EVENT = 30,
IOCNUM_INJECT_EXCEPTION = 30,
IOCNUM_LAPIC_IRQ = 31,
IOCNUM_INJECT_NMI = 32,
IOCNUM_IOAPIC_ASSERT_IRQ = 33,
@ -215,8 +214,8 @@ enum {
_IOW('v', IOCNUM_SET_SEGMENT_DESCRIPTOR, struct vm_seg_desc)
#define VM_GET_SEGMENT_DESCRIPTOR \
_IOWR('v', IOCNUM_GET_SEGMENT_DESCRIPTOR, struct vm_seg_desc)
#define VM_INJECT_EVENT \
_IOW('v', IOCNUM_INJECT_EVENT, struct vm_event)
#define VM_INJECT_EXCEPTION \
_IOW('v', IOCNUM_INJECT_EXCEPTION, struct vm_exception)
#define VM_LAPIC_IRQ \
_IOW('v', IOCNUM_LAPIC_IRQ, struct vm_lapic_irq)
#define VM_LAPIC_LOCAL_IRQ \

View File

@ -114,15 +114,6 @@ amdv_setdesc(void *vmi, int vcpu, int num, struct seg_desc *desc)
return (EINVAL);
}
static int
amdv_inject_event(void *vmi, int vcpu, int type, int vector,
uint32_t error_code, int error_code_valid)
{
printf("amdv_inject_event: not implemented\n");
return (EINVAL);
}
static int
amdv_getcap(void *arg, int vcpu, int type, int *retval)
{
@ -180,7 +171,6 @@ struct vmm_ops vmm_ops_amd = {
amdv_setreg,
amdv_getdesc,
amdv_setdesc,
amdv_inject_event,
amdv_getcap,
amdv_setcap,
amdv_vmspace_alloc,

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
#include <machine/vmm.h>
#include <machine/vmm_dev.h>
#include "vmm_host.h"
#include "vmm_ipi.h"
#include "vmm_msr.h"
@ -935,7 +936,6 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
vmx->state[i].lastcpu = -1;
vmx->state[i].vpid = vpid[i];
vmx->state[i].user_event.intr_info = 0;
msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
@ -1113,66 +1113,6 @@ vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
#define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
static void
vmx_inject_user_event(struct vmx *vmx, int vcpu)
{
struct vmxevent *user_event;
uint32_t info;
user_event = &vmx->state[vcpu].user_event;
info = vmcs_read(VMCS_ENTRY_INTR_INFO);
KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_user_event: invalid "
"VM-entry interruption information %#x", info));
vmcs_write(VMCS_ENTRY_INTR_INFO, user_event->intr_info);
if (user_event->intr_info & VMCS_INTR_DEL_ERRCODE)
vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, user_event->error_code);
user_event->intr_info = 0;
}
static void
vmx_inject_exception(struct vmx *vmx, int vcpu, struct vm_exit *vmexit,
int fault, int errvalid, int errcode)
{
uint32_t info;
info = vmcs_read(VMCS_ENTRY_INTR_INFO);
KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_exception: invalid "
"VM-entry interruption information %#x", info));
/*
* Although INTR_T_HWEXCEPTION does not advance %rip, vmx_run()
* always advances it, so we clear the instruction length to zero
* explicitly.
*/
vmexit->inst_length = 0;
info = fault | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID;
if (errvalid) {
info |= VMCS_INTR_DEL_ERRCODE;
vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, errcode);
}
vmcs_write(VMCS_ENTRY_INTR_INFO, info);
VCPU_CTR2(vmx->vm, vcpu, "Injecting fault %d (errcode %d)", fault,
errcode);
}
/* All GP# faults VMM injects use an error code of 0. */
static void
vmx_inject_gp(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
{
vmx_inject_exception(vmx, vcpu, vmexit, IDT_GP, 1, 0);
}
static void
vmx_inject_ud(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
{
vmx_inject_exception(vmx, vcpu, vmexit, IDT_UD, 0, 0);
}
static void
vmx_inject_nmi(struct vmx *vmx, int vcpu)
{
@ -1202,10 +1142,27 @@ vmx_inject_nmi(struct vmx *vmx, int vcpu)
static void
vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
{
struct vm_exception exc;
int vector, need_nmi_exiting;
uint64_t rflags;
uint32_t gi, info;
if (vm_exception_pending(vmx->vm, vcpu, &exc)) {
KASSERT(exc.vector >= 0 && exc.vector < 32,
("%s: invalid exception vector %d", __func__, exc.vector));
info = vmcs_read(VMCS_ENTRY_INTR_INFO);
KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
"pending exception %d: %#x", __func__, exc.vector, info));
info = exc.vector | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID;
if (exc.error_code_valid) {
info |= VMCS_INTR_DEL_ERRCODE;
vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, exc.error_code);
}
vmcs_write(VMCS_ENTRY_INTR_INFO, info);
}
if (vm_nmi_pending(vmx->vm, vcpu)) {
/*
* If there are no conditions blocking NMI injection then
@ -1238,24 +1195,6 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
vmx_set_nmi_window_exiting(vmx, vcpu);
}
/*
* If there is a user injection event pending and there isn't
* an interrupt queued already, inject the user event.
*/
if (vmx->state[vcpu].user_event.intr_info & VMCS_INTR_VALID) {
info = vmcs_read(VMCS_ENTRY_INTR_INFO);
if ((info & VMCS_INTR_VALID) == 0) {
vmx_inject_user_event(vmx, vcpu);
} else {
/*
* XXX: Do we need to force an exit so this can
* be injected?
*/
VCPU_CTR1(vmx->vm, vcpu, "Cannot inject user event "
"due to VM-entry intr info %#x", info);
}
}
if (virtual_interrupt_delivery) {
vmx_inject_pir(vlapic);
return;
@ -1299,6 +1238,7 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
* This is expected and could happen for multiple reasons:
* - A vectoring VM-entry was aborted due to astpending
* - A VM-exit happened during event injection.
* - An exception was injected above.
* - An NMI was injected above or after "NMI window exiting"
*/
VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
@ -1375,30 +1315,30 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
/* Only xcr0 is supported. */
if (vmxctx->guest_rcx != 0) {
vmx_inject_gp(vmx, vcpu, vmexit);
vm_inject_gp(vmx->vm, vcpu);
return (HANDLED);
}
/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
vmx_inject_ud(vmx, vcpu, vmexit);
vm_inject_ud(vmx->vm, vcpu);
return (HANDLED);
}
xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
if ((xcrval & ~limits->xcr0_allowed) != 0) {
vmx_inject_gp(vmx, vcpu, vmexit);
vm_inject_gp(vmx->vm, vcpu);
return (HANDLED);
}
if (!(xcrval & XFEATURE_ENABLED_X87)) {
vmx_inject_gp(vmx, vcpu, vmexit);
vm_inject_gp(vmx->vm, vcpu);
return (HANDLED);
}
if ((xcrval & (XFEATURE_ENABLED_AVX | XFEATURE_ENABLED_SSE)) ==
XFEATURE_ENABLED_AVX) {
vmx_inject_gp(vmx, vcpu, vmexit);
vm_inject_gp(vmx->vm, vcpu);
return (HANDLED);
}
@ -2422,38 +2362,6 @@ vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc));
}
static int
vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code,
int code_valid)
{
struct vmx *vmx = arg;
struct vmxevent *user_event = &vmx->state[vcpu].user_event;
static uint32_t type_map[VM_EVENT_MAX] = {
0x1, /* VM_EVENT_NONE */
0x0, /* VM_HW_INTR */
0x2, /* VM_NMI */
0x3, /* VM_HW_EXCEPTION */
0x4, /* VM_SW_INTR */
0x5, /* VM_PRIV_SW_EXCEPTION */
0x6, /* VM_SW_EXCEPTION */
};
/*
* If there is already an exception pending to be delivered to the
* vcpu then just return.
*/
if (user_event->intr_info & VMCS_INTR_VALID)
return (EAGAIN);
user_event->intr_info = vector | (type_map[type] << 8) | VMCS_INTR_VALID;
if (code_valid) {
user_event->intr_info |= VMCS_INTR_DEL_ERRCODE;
user_event->error_code = code;
}
return (0);
}
static int
vmx_getcap(void *arg, int vcpu, int type, int *retval)
{
@ -2900,7 +2808,6 @@ struct vmm_ops vmm_ops_intel = {
vmx_setreg,
vmx_getdesc,
vmx_setdesc,
vmx_inject,
vmx_getcap,
vmx_setcap,
ept_vmspace_alloc,

View File

@ -80,15 +80,9 @@ struct vmxcap {
uint32_t proc_ctls2;
};
struct vmxevent {
uint32_t intr_info;
uint32_t error_code;
};
struct vmxstate {
int lastcpu; /* host cpu that this 'vcpu' last ran on */
uint16_t vpid;
struct vmxevent user_event;
};
struct apic_page {

View File

@ -94,6 +94,8 @@ struct vcpu {
struct vm_exit exitinfo;
enum x2apic_state x2apic_state;
int nmi_pending;
struct vm_exception exception;
int exception_pending;
};
#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
@ -157,8 +159,6 @@ static struct vmm_ops *ops;
(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
#define VMSETDESC(vmi, vcpu, num, desc) \
(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
#define VMINJECT(vmi, vcpu, type, vec, ec, ecv) \
(ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
#define VMGETCAP(vmi, vcpu, num, retval) \
(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
#define VMSETCAP(vmi, vcpu, num, val) \
@ -1202,19 +1202,91 @@ vm_run(struct vm *vm, struct vm_run *vmrun)
}
int
vm_inject_event(struct vm *vm, int vcpuid, int type,
int vector, uint32_t code, int code_valid)
vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *exception)
{
struct vcpu *vcpu;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
if (exception->vector < 0 || exception->vector >= 32)
return (EINVAL);
if (vector < 0 || vector > 255)
return (EINVAL);
vcpu = &vm->vcpu[vcpuid];
return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
if (vcpu->exception_pending) {
VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
"pending exception %d", exception->vector,
vcpu->exception.vector);
return (EBUSY);
}
vcpu->exception_pending = 1;
vcpu->exception = *exception;
VCPU_CTR1(vm, vcpuid, "Exception %d pending", exception->vector);
return (0);
}
int
vm_exception_pending(struct vm *vm, int vcpuid, struct vm_exception *exception)
{
struct vcpu *vcpu;
int pending;
KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
vcpu = &vm->vcpu[vcpuid];
pending = vcpu->exception_pending;
if (pending) {
vcpu->exception_pending = 0;
*exception = vcpu->exception;
VCPU_CTR1(vm, vcpuid, "Exception %d delivered",
exception->vector);
}
return (pending);
}
static void
vm_inject_fault(struct vm *vm, int vcpuid, struct vm_exception *exception)
{
struct vm_exit *vmexit;
int error;
error = vm_inject_exception(vm, vcpuid, exception);
KASSERT(error == 0, ("vm_inject_exception error %d", error));
/*
* A fault-like exception allows the instruction to be restarted
* after the exception handler returns.
*
* By setting the inst_length to 0 we ensure that the instruction
* pointer remains at the faulting instruction.
*/
vmexit = vm_exitinfo(vm, vcpuid);
vmexit->inst_length = 0;
}
void
vm_inject_gp(struct vm *vm, int vcpuid)
{
struct vm_exception gpf = {
.vector = IDT_GP,
.error_code_valid = 1,
.error_code = 0
};
vm_inject_fault(vm, vcpuid, &gpf);
}
void
vm_inject_ud(struct vm *vm, int vcpuid)
{
struct vm_exception udf = {
.vector = IDT_UD,
.error_code_valid = 0
};
vm_inject_fault(vm, vcpuid, &udf);
}
static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");

View File

@ -150,7 +150,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
struct vm_register *vmreg;
struct vm_seg_desc *vmsegdesc;
struct vm_run *vmrun;
struct vm_event *vmevent;
struct vm_exception *vmexc;
struct vm_lapic_irq *vmirq;
struct vm_lapic_msi *vmmsi;
struct vm_ioapic_irq *ioapic_irq;
@ -181,7 +181,7 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
case VM_SET_REGISTER:
case VM_GET_SEGMENT_DESCRIPTOR:
case VM_SET_SEGMENT_DESCRIPTOR:
case VM_INJECT_EVENT:
case VM_INJECT_EXCEPTION:
case VM_GET_CAPABILITY:
case VM_SET_CAPABILITY:
case VM_PPTDEV_MSI:
@ -282,12 +282,9 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
error = vm_unassign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
pptdev->func);
break;
case VM_INJECT_EVENT:
vmevent = (struct vm_event *)data;
error = vm_inject_event(sc->vm, vmevent->cpuid, vmevent->type,
vmevent->vector,
vmevent->error_code,
vmevent->error_code_valid);
case VM_INJECT_EXCEPTION:
vmexc = (struct vm_exception *)data;
error = vm_inject_exception(sc->vm, vmexc->cpuid, vmexc);
break;
case VM_INJECT_NMI:
vmnmi = (struct vm_nmi *)data;

View File

@ -321,8 +321,11 @@ vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
if (error != 0) {
fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
vme->u.msr.code, *pvcpu);
if (strictmsr)
return (VMEXIT_ABORT);
if (strictmsr) {
error = vm_inject_exception2(ctx, *pvcpu, IDT_GP, 0);
assert(error == 0);
return (VMEXIT_RESTART);
}
}
eax = val;
@ -345,8 +348,11 @@ vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
if (error != 0) {
fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
vme->u.msr.code, vme->u.msr.wval, *pvcpu);
if (strictmsr)
return (VMEXIT_ABORT);
if (strictmsr) {
error = vm_inject_exception2(ctx, *pvcpu, IDT_GP, 0);
assert(error == 0);
return (VMEXIT_RESTART);
}
}
return (VMEXIT_CONTINUE);
}