Do the linear address calculation for the ins/outs emulation using a new

API function 'vie_calculate_gla()'.

While the current implementation is simplistic it forms the basis of doing
segmentation checks if the guest is in 32-bit protected mode.
This commit is contained in:
Neel Natu 2014-05-25 00:57:24 +00:00
parent 497dcf4c36
commit 5382c19d81
6 changed files with 53 additions and 68 deletions

View File

@ -426,7 +426,6 @@ struct vm_inout_str {
int addrsize;
enum vm_reg_name seg_name;
struct seg_desc seg_desc;
uint64_t gla; /* may be set to VIE_INVALID_GLA */
};
struct vm_exit {

View File

@ -67,6 +67,9 @@ int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla);
uint64_t vie_size2mask(int size);
int vie_calculate_gla(enum vm_cpu_mode cpu_mode, int addrsize,
enum vm_reg_name seg, struct seg_desc *desc, uint64_t off, uint64_t *gla);
#ifdef _KERNEL
/*
* APIs to fetch and decode the instruction from nested page fault handler.
@ -89,9 +92,6 @@ int vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
void vie_init(struct vie *vie);
uint64_t vie_segbase(enum vm_reg_name segment, enum vm_cpu_mode cpu_mode,
const struct seg_desc *desc);
/*
* Decode the instruction fetched into 'vie' so it can be emulated.
*

View File

@ -2012,7 +2012,6 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
vis->addrsize = inout_str_addrsize(inst_info);
inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
vis->gla = vmcs_gla();
}
break;
case EXIT_REASON_CPUID:

View File

@ -607,6 +607,38 @@ vie_size2mask(int size)
return (size2mask[size]);
}
int
vie_calculate_gla(enum vm_cpu_mode cpu_mode, int addrsize, enum vm_reg_name seg,
struct seg_desc *desc, uint64_t offset, uint64_t *gla)
{
uint64_t segbase;
int glasize;
KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
("%s: invalid segment %d", __func__, seg));
glasize = (cpu_mode == CPU_MODE_64BIT) ? 8 : 4;
/*
* In 64-bit mode all segments except %fs and %gs have a segment
* base address of 0.
*/
if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
seg != VM_REG_GUEST_GS) {
segbase = 0;
} else {
segbase = desc->base;
}
/*
* Truncate 'offset' to the effective address size before adding
* it to the segment base.
*/
offset &= vie_size2mask(addrsize);
*gla = (segbase + offset) & vie_size2mask(glasize);
return (0);
}
#ifdef _KERNEL
void
vie_init(struct vie *vie)
@ -1271,42 +1303,4 @@ vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
return (0);
}
uint64_t
vie_segbase(enum vm_reg_name seg, enum vm_cpu_mode cpu_mode,
const struct seg_desc *desc)
{
int basesize;
basesize = 4; /* default segment width in bytes */
switch (seg) {
case VM_REG_GUEST_ES:
case VM_REG_GUEST_CS:
case VM_REG_GUEST_SS:
case VM_REG_GUEST_DS:
if (cpu_mode == CPU_MODE_64BIT) {
/*
* Segments having an implicit base address of 0
* in 64-bit mode.
*/
return (0);
}
break;
case VM_REG_GUEST_FS:
case VM_REG_GUEST_GS:
if (cpu_mode == CPU_MODE_64BIT) {
/*
* In 64-bit mode the FS and GS base address is 8 bytes
* wide.
*/
basesize = 8;
}
break;
default:
panic("%s: invalid segment register %d", __func__, seg);
}
return (desc->base & size2mask[basesize]);
}
#endif /* _KERNEL */

View File

@ -144,7 +144,6 @@ static int
emulate_inout_str(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
{
struct vm_inout_str *vis;
uint64_t gla, index, segbase;
int in;
vis = &vmexit->u.inout_str;
@ -182,21 +181,6 @@ emulate_inout_str(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
return (EINVAL);
}
segbase = vie_segbase(vis->seg_name, vis->paging.cpu_mode,
&vis->seg_desc);
index = vis->index & vie_size2mask(vis->addrsize);
gla = segbase + index;
/*
* Verify that the computed linear address matches with the one
* provided by hardware.
*/
if (vis->gla != VIE_INVALID_GLA) {
KASSERT(gla == vis->gla, ("%s: gla mismatch "
"%#lx/%#lx", __func__, gla, vis->gla));
}
vis->gla = gla;
*retu = true;
return (0); /* Return to userspace to finish emulation */
}

View File

@ -147,15 +147,25 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
/* Count register */
count = vis->count & vie_size2mask(addrsize);
if (vie_alignment_check(vis->paging.cpl, bytes, vis->cr0,
vis->rflags, vis->gla)) {
error = vm_inject_exception2(ctx, vcpu, IDT_AC, 0);
assert(error == 0);
return (INOUT_RESTART);
}
gla = vis->gla;
while (count) {
if (vie_calculate_gla(vis->paging.cpu_mode,
vis->addrsize, vis->seg_name, &vis->seg_desc,
index, &gla)) {
error = vm_inject_exception2(ctx, vcpu,
IDT_GP, 0);
assert(error == 0);
return (INOUT_RESTART);
}
if (vie_alignment_check(vis->paging.cpl, bytes,
vis->cr0, vis->rflags, gla)) {
error = vm_inject_exception2(ctx, vcpu,
IDT_AC, 0);
assert(error == 0);
return (INOUT_RESTART);
}
val = 0;
if (!in) {
error = vm_copyin(ctx, vcpu, &vis->paging,
@ -190,7 +200,6 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
index += bytes;
count--;
gla += bytes;
}
/* Update index register */