Add RIP-relative addressing to the instruction decoder.

Rework the guest register fetch code to allow the RIP to
be extracted from the VMCS while the kernel decoder is
functioning.

Hit by the OpenBSD local-apic code.

Submitted by:	neel
Reviewed by:	grehan
Obtained from:	NetApp
This commit is contained in:
Peter Grehan 2013-04-25 04:56:43 +00:00
parent 3d8e9c1283
commit d3c11f40a5
6 changed files with 72 additions and 79 deletions

View File

@ -135,12 +135,12 @@ enum vcpu_state {
};
int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state);
enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu);
enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
static int __inline
vcpu_is_running(struct vm *vm, int vcpu)
vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
{
return (vcpu_get_state(vm, vcpu) == VCPU_RUNNING);
return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
}
void *vcpu_stats(struct vm *vm, int vcpu);

View File

@ -174,7 +174,7 @@ vmcs_seg_desc_encoding(int seg, uint32_t *base, uint32_t *lim, uint32_t *acc)
}
int
vmcs_getreg(struct vmcs *vmcs, int ident, uint64_t *retval)
vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *retval)
{
int error;
uint32_t encoding;
@ -194,14 +194,19 @@ vmcs_getreg(struct vmcs *vmcs, int ident, uint64_t *retval)
if (encoding == (uint32_t)-1)
return (EINVAL);
VMPTRLD(vmcs);
if (!running)
VMPTRLD(vmcs);
error = vmread(encoding, retval);
VMCLEAR(vmcs);
if (!running)
VMCLEAR(vmcs);
return (error);
}
int
vmcs_setreg(struct vmcs *vmcs, int ident, uint64_t val)
vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val)
{
int error;
uint32_t encoding;
@ -216,9 +221,14 @@ vmcs_setreg(struct vmcs *vmcs, int ident, uint64_t val)
val = vmcs_fix_regval(encoding, val);
VMPTRLD(vmcs);
if (!running)
VMPTRLD(vmcs);
error = vmwrite(encoding, val);
VMCLEAR(vmcs);
if (!running)
VMCLEAR(vmcs);
return (error);
}

View File

@ -52,8 +52,8 @@ int vmcs_set_defaults(struct vmcs *vmcs, u_long host_rip, u_long host_rsp,
uint32_t procbased_ctls2, uint32_t exit_ctls,
uint32_t entry_ctls, u_long msr_bitmap,
uint16_t vpid);
int vmcs_getreg(struct vmcs *vmcs, int ident, uint64_t *retval);
int vmcs_setreg(struct vmcs *vmcs, int ident, uint64_t val);
int vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *rv);
int vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val);
int vmcs_getdesc(struct vmcs *vmcs, int ident,
struct seg_desc *desc);
int vmcs_setdesc(struct vmcs *vmcs, int ident,

View File

@ -667,11 +667,11 @@ vmx_setup_cr_shadow(int which, struct vmcs *vmcs)
shadow_value = cr4_ones_mask;
}
error = vmcs_setreg(vmcs, VMCS_IDENT(mask_ident), mask_value);
error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
if (error)
return (error);
error = vmcs_setreg(vmcs, VMCS_IDENT(shadow_ident), shadow_value);
error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), shadow_value);
if (error)
return (error);
@ -1617,49 +1617,34 @@ vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
static int
vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
{
int running, hostcpu;
struct vmx *vmx = arg;
running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
if (running && hostcpu != curcpu)
panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
return (0);
/*
* If the vcpu is running then don't mess with the VMCS.
*
* vmcs_getreg will VMCLEAR the vmcs when it is done which will cause
* the subsequent vmlaunch/vmresume to fail.
*/
if (vcpu_is_running(vmx->vm, vcpu))
panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
return (vmcs_getreg(&vmx->vmcs[vcpu], reg, retval));
return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
}
static int
vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
{
int error;
int error, hostcpu, running;
uint64_t ctls;
struct vmx *vmx = arg;
/*
* XXX Allow caller to set contents of the guest registers saved in
* the 'vmxctx' even though the vcpu might be running. We need this
* specifically to support the rdmsr emulation that will set the
* %eax and %edx registers during vm exit processing.
*/
running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
if (running && hostcpu != curcpu)
panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
return (0);
/*
* If the vcpu is running then don't mess with the VMCS.
*
* vmcs_setreg will VMCLEAR the vmcs when it is done which will cause
* the subsequent vmlaunch/vmresume to fail.
*/
if (vcpu_is_running(vmx->vm, vcpu))
panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
error = vmcs_setreg(&vmx->vmcs[vcpu], reg, val);
error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
if (error == 0) {
/*
@ -1669,13 +1654,13 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
*/
if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
(reg == VM_REG_GUEST_EFER)) {
vmcs_getreg(&vmx->vmcs[vcpu],
vmcs_getreg(&vmx->vmcs[vcpu], running,
VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
if (val & EFER_LMA)
ctls |= VM_ENTRY_GUEST_LMA;
else
ctls &= ~VM_ENTRY_GUEST_LMA;
vmcs_setreg(&vmx->vmcs[vcpu],
vmcs_setreg(&vmx->vmcs[vcpu], running,
VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
}
}
@ -1722,7 +1707,7 @@ vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code,
* If there is already an exception pending to be delivered to the
* vcpu then just return.
*/
error = vmcs_getreg(vmcs, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info);
error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info);
if (error)
return (error);
@ -1731,12 +1716,12 @@ vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code,
info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0);
info |= VMCS_INTERRUPTION_INFO_VALID;
error = vmcs_setreg(vmcs, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info);
error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info);
if (error != 0)
return (error);
if (code_valid) {
error = vmcs_setreg(vmcs,
error = vmcs_setreg(vmcs, 0,
VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR),
code);
}

View File

@ -894,7 +894,7 @@ vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state state)
}
enum vcpu_state
vcpu_get_state(struct vm *vm, int vcpuid)
vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
{
struct vcpu *vcpu;
enum vcpu_state state;
@ -906,6 +906,8 @@ vcpu_get_state(struct vm *vm, int vcpuid)
vcpu_lock(vcpu);
state = vcpu->state;
if (hostcpu != NULL)
*hostcpu = vcpu->hostcpu;
vcpu_unlock(vcpu);
return (state);

View File

@ -50,7 +50,10 @@ __FBSDID("$FreeBSD$");
#include <vmmapi.h>
#endif /* _KERNEL */
enum cpu_mode {
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
};
/* struct vie_op.op_type */
enum {
@ -132,33 +135,11 @@ static uint64_t size2mask[] = {
[8] = 0xffffffffffffffff,
};
static int
vie_valid_register(enum vm_reg_name reg)
{
#ifdef _KERNEL
/*
* XXX
* The operand register in which we store the result of the
* read must be a GPR that we can modify even if the vcpu
* is "running". All the GPRs qualify except for %rsp.
*
* This is a limitation of the vm_set_register() API
* and can be fixed if necessary.
*/
if (reg == VM_REG_GUEST_RSP)
return (0);
#endif
return (1);
}
static int
vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
{
int error;
if (!vie_valid_register(reg))
return (EINVAL);
error = vm_get_register(vm, vcpuid, reg, rval);
return (error);
@ -196,9 +177,6 @@ vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
}
}
if (!vie_valid_register(reg))
return (EINVAL);
error = vm_get_register(vm, vcpuid, reg, &val);
*rval = val >> rshift;
return (error);
@ -211,9 +189,6 @@ vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
int error;
uint64_t origval;
if (!vie_valid_register(reg))
return (EINVAL);
switch (size) {
case 1:
case 2:
@ -583,13 +558,16 @@ decode_opcode(struct vie *vie)
return (0);
}
/*
* XXX assuming 32-bit or 64-bit guest
*/
static int
decode_modrm(struct vie *vie)
{
uint8_t x;
enum cpu_mode cpu_mode;
/*
* XXX assuming that guest is in IA-32E 64-bit mode
*/
cpu_mode = CPU_MODE_64BIT;
if (vie_peek(vie, &x))
return (-1);
@ -642,7 +620,18 @@ decode_modrm(struct vie *vie)
case VIE_MOD_INDIRECT:
if (vie->rm == VIE_RM_DISP32) {
vie->disp_bytes = 4;
vie->base_register = VM_REG_LAST; /* no base */
/*
* Table 2-7. RIP-Relative Addressing
*
* In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
* whereas in compatibility mode it just implies disp32.
*/
if (cpu_mode == CPU_MODE_64BIT)
vie->base_register = VM_REG_GUEST_RIP;
else
vie->base_register = VM_REG_LAST;
}
break;
}
@ -812,6 +801,13 @@ verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
error, vie->base_register);
return (-1);
}
/*
* RIP-relative addressing starts from the following
* instruction
*/
if (vie->base_register == VM_REG_GUEST_RIP)
base += vie->num_valid;
}
idx = 0;