Fix fault injection in bhyve.
The faulting instruction needs to be restarted when the exception handler is done handling the fault. bhyve now does this correctly by setting 'vmexit[vcpu].inst_length' to zero so the %rip is not advanced. A minor complication is that the fault injection APIs are used by instruction emulation code that is shared by vmm.ko and bhyve. Thus the argument that refers to 'struct vm *' in kernel or 'struct vmctx *' in userspace needs to be loosely typed as a 'void *'.
This commit is contained in:
parent
31a01497f2
commit
d37f2adb38
@ -1146,30 +1146,3 @@ vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
|
||||
error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
|
||||
return (error);
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_ss(struct vmctx *ctx, int vcpu, int errcode)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = vm_inject_exception2(ctx, vcpu, IDT_SS, errcode);
|
||||
assert(error == 0);
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_ac(struct vmctx *ctx, int vcpu, int errcode)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = vm_inject_exception2(ctx, vcpu, IDT_AC, errcode);
|
||||
assert(error == 0);
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_gp(struct vmctx *ctx, int vcpu, int errcode)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = vm_inject_exception2(ctx, vcpu, IDT_GP, errcode);
|
||||
assert(error == 0);
|
||||
}
|
||||
|
@ -133,11 +133,6 @@ void vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *guest_iov,
|
||||
void vm_copyout(struct vmctx *ctx, int vcpu, const void *host_src,
|
||||
struct iovec *guest_iov, size_t len);
|
||||
|
||||
/* Helper functions to inject exceptions */
|
||||
void vm_inject_ss(struct vmctx *ctx, int vcpu, int errcode);
|
||||
void vm_inject_ac(struct vmctx *ctx, int vcpu, int errcode);
|
||||
void vm_inject_gp(struct vmctx *ctx, int vcpu, int errcode);
|
||||
|
||||
/* Reset vcpu register state */
|
||||
int vcpu_reset(struct vmctx *ctx, int vcpu);
|
||||
|
||||
|
@ -29,6 +29,8 @@
|
||||
#ifndef _VMM_H_
|
||||
#define _VMM_H_
|
||||
|
||||
#include <x86/segments.h>
|
||||
|
||||
enum vm_suspend_how {
|
||||
VM_SUSPEND_NONE,
|
||||
VM_SUSPEND_RESET,
|
||||
@ -316,12 +318,6 @@ int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
|
||||
|
||||
int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
|
||||
|
||||
void vm_inject_gp(struct vm *vm, int vcpuid); /* general protection fault */
|
||||
void vm_inject_ud(struct vm *vm, int vcpuid); /* undefined instruction fault */
|
||||
void vm_inject_ac(struct vm *vm, int vcpuid, int errcode); /* #AC */
|
||||
void vm_inject_ss(struct vm *vm, int vcpuid, int errcode); /* #SS */
|
||||
void vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2);
|
||||
|
||||
enum vm_reg_name vm_segment_name(int seg_encoding);
|
||||
|
||||
struct vm_copyinfo {
|
||||
@ -579,4 +575,34 @@ struct vm_exit {
|
||||
} u;
|
||||
};
|
||||
|
||||
/* APIs to inject faults into the guest */
|
||||
void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
|
||||
int errcode);
|
||||
|
||||
static void __inline
|
||||
vm_inject_ud(void *vm, int vcpuid)
|
||||
{
|
||||
vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
|
||||
}
|
||||
|
||||
static void __inline
|
||||
vm_inject_gp(void *vm, int vcpuid)
|
||||
{
|
||||
vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
|
||||
}
|
||||
|
||||
static void __inline
|
||||
vm_inject_ac(void *vm, int vcpuid, int errcode)
|
||||
{
|
||||
vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
|
||||
}
|
||||
|
||||
static void __inline
|
||||
vm_inject_ss(void *vm, int vcpuid, int errcode)
|
||||
{
|
||||
vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
|
||||
}
|
||||
|
||||
void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
|
||||
|
||||
#endif /* _VMM_H_ */
|
||||
|
@ -1689,13 +1689,21 @@ vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *exception)
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
vm_inject_fault(struct vm *vm, int vcpuid, struct vm_exception *exception)
|
||||
void
|
||||
vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
|
||||
int errcode)
|
||||
{
|
||||
struct vm_exception exception;
|
||||
struct vm_exit *vmexit;
|
||||
struct vm *vm;
|
||||
int error;
|
||||
|
||||
error = vm_inject_exception(vm, vcpuid, exception);
|
||||
vm = vmarg;
|
||||
|
||||
exception.vector = vector;
|
||||
exception.error_code = errcode;
|
||||
exception.error_code_valid = errcode_valid;
|
||||
error = vm_inject_exception(vm, vcpuid, &exception);
|
||||
KASSERT(error == 0, ("vm_inject_exception error %d", error));
|
||||
|
||||
/*
|
||||
@ -1710,69 +1718,19 @@ vm_inject_fault(struct vm *vm, int vcpuid, struct vm_exception *exception)
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_pf(struct vm *vm, int vcpuid, int error_code, uint64_t cr2)
|
||||
vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
|
||||
{
|
||||
struct vm_exception pf = {
|
||||
.vector = IDT_PF,
|
||||
.error_code_valid = 1,
|
||||
.error_code = error_code
|
||||
};
|
||||
struct vm *vm;
|
||||
int error;
|
||||
|
||||
vm = vmarg;
|
||||
VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
|
||||
error_code, cr2);
|
||||
|
||||
error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
|
||||
KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
|
||||
|
||||
vm_inject_fault(vm, vcpuid, &pf);
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_gp(struct vm *vm, int vcpuid)
|
||||
{
|
||||
struct vm_exception gpf = {
|
||||
.vector = IDT_GP,
|
||||
.error_code_valid = 1,
|
||||
.error_code = 0
|
||||
};
|
||||
|
||||
vm_inject_fault(vm, vcpuid, &gpf);
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_ud(struct vm *vm, int vcpuid)
|
||||
{
|
||||
struct vm_exception udf = {
|
||||
.vector = IDT_UD,
|
||||
.error_code_valid = 0
|
||||
};
|
||||
|
||||
vm_inject_fault(vm, vcpuid, &udf);
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_ac(struct vm *vm, int vcpuid, int error_code)
|
||||
{
|
||||
struct vm_exception acf = {
|
||||
.vector = IDT_AC,
|
||||
.error_code_valid = 1,
|
||||
.error_code = error_code
|
||||
};
|
||||
|
||||
vm_inject_fault(vm, vcpuid, &acf);
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_ss(struct vm *vm, int vcpuid, int error_code)
|
||||
{
|
||||
struct vm_exception ssf = {
|
||||
.vector = IDT_SS,
|
||||
.error_code_valid = 1,
|
||||
.error_code = error_code
|
||||
};
|
||||
|
||||
vm_inject_fault(vm, vcpuid, &ssf);
|
||||
vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
|
||||
}
|
||||
|
||||
static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
|
||||
|
@ -96,7 +96,7 @@ static cpuset_t cpumask;
|
||||
|
||||
static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
|
||||
|
||||
struct vm_exit vmexit[VM_MAXCPU];
|
||||
static struct vm_exit vmexit[VM_MAXCPU];
|
||||
|
||||
struct bhyvestats {
|
||||
uint64_t vmexit_bogus;
|
||||
@ -182,6 +182,27 @@ pincpu_parse(const char *opt)
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid,
|
||||
int errcode)
|
||||
{
|
||||
struct vmctx *ctx;
|
||||
int error;
|
||||
|
||||
ctx = arg;
|
||||
if (errcode_valid)
|
||||
error = vm_inject_exception2(ctx, vcpu, vector, errcode);
|
||||
else
|
||||
error = vm_inject_exception(ctx, vcpu, vector);
|
||||
assert(error == 0);
|
||||
|
||||
/*
|
||||
* Set the instruction length to 0 to ensure that the instruction is
|
||||
* restarted when the fault handler returns.
|
||||
*/
|
||||
vmexit[vcpu].inst_length = 0;
|
||||
}
|
||||
|
||||
void *
|
||||
paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
|
||||
{
|
||||
@ -347,7 +368,7 @@ vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
|
||||
fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
|
||||
vme->u.msr.code, *pvcpu);
|
||||
if (strictmsr) {
|
||||
vm_inject_gp(ctx, *pvcpu, 0);
|
||||
vm_inject_gp(ctx, *pvcpu);
|
||||
return (VMEXIT_RESTART);
|
||||
}
|
||||
}
|
||||
@ -373,7 +394,7 @@ vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
|
||||
fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
|
||||
vme->u.msr.code, vme->u.msr.wval, *pvcpu);
|
||||
if (strictmsr) {
|
||||
vm_inject_gp(ctx, *pvcpu, 0);
|
||||
vm_inject_gp(ctx, *pvcpu);
|
||||
return (VMEXIT_RESTART);
|
||||
}
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
|
||||
if (vie_calculate_gla(vis->paging.cpu_mode,
|
||||
vis->seg_name, &vis->seg_desc, index, bytes,
|
||||
addrsize, prot, &gla)) {
|
||||
vm_inject_gp(ctx, vcpu, 0);
|
||||
vm_inject_gp(ctx, vcpu);
|
||||
retval = INOUT_RESTART;
|
||||
break;
|
||||
}
|
||||
|
@ -160,8 +160,6 @@ usd_to_seg_desc(struct user_segment_descriptor *usd)
|
||||
static void
|
||||
sel_exception(struct vmctx *ctx, int vcpu, int vector, uint16_t sel, int ext)
|
||||
{
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Bit 2 from the selector is retained as-is in the error code.
|
||||
*
|
||||
@ -174,8 +172,7 @@ sel_exception(struct vmctx *ctx, int vcpu, int vector, uint16_t sel, int ext)
|
||||
sel &= ~0x3;
|
||||
if (ext)
|
||||
sel |= 0x1;
|
||||
error = vm_inject_exception2(ctx, vcpu, vector, sel);
|
||||
assert(error == 0);
|
||||
vm_inject_fault(ctx, vcpu, vector, 1, sel);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -508,7 +505,7 @@ tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
|
||||
*/
|
||||
reserved = ~maxphyaddr | 0x1E6;
|
||||
if (pdpte[i] & reserved) {
|
||||
vm_inject_gp(ctx, vcpu, 0);
|
||||
vm_inject_gp(ctx, vcpu);
|
||||
return (VMEXIT_RESTART);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user