Add segment protection and limits violation checks in vie_calculate_gla()
for 32-bit x86 guests. Tested using ins/outs executed in a FreeBSD/i386 guest.
This commit is contained in:
parent
857aabdb8d
commit
65ffa035a7
@ -321,6 +321,11 @@ struct seg_desc {
|
||||
uint32_t limit;
|
||||
uint32_t access;
|
||||
};
|
||||
#define SEG_DESC_TYPE(desc) ((desc)->access & 0x001f)
|
||||
#define SEG_DESC_PRESENT(desc) ((desc)->access & 0x0080)
|
||||
#define SEG_DESC_DEF32(desc) ((desc)->access & 0x4000)
|
||||
#define SEG_DESC_GRANULARITY(desc) ((desc)->access & 0x8000)
|
||||
#define SEG_DESC_UNUSABLE(desc) ((desc)->access & 0x10000)
|
||||
|
||||
enum vm_cpu_mode {
|
||||
CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
|
||||
|
@ -29,6 +29,8 @@
|
||||
#ifndef _VMM_INSTRUCTION_EMUL_H_
|
||||
#define _VMM_INSTRUCTION_EMUL_H_
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
/*
|
||||
* Callback functions to read and write memory regions.
|
||||
*/
|
||||
@ -67,8 +69,9 @@ int vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla);
|
||||
|
||||
uint64_t vie_size2mask(int size);
|
||||
|
||||
int vie_calculate_gla(enum vm_cpu_mode cpu_mode, int addrsize,
|
||||
enum vm_reg_name seg, struct seg_desc *desc, uint64_t off, uint64_t *gla);
|
||||
int vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
|
||||
struct seg_desc *desc, uint64_t off, int length, int addrsize, int prot,
|
||||
uint64_t *gla);
|
||||
|
||||
#ifdef _KERNEL
|
||||
/*
|
||||
|
@ -608,16 +608,92 @@ vie_size2mask(int size)
|
||||
}
|
||||
|
||||
int
|
||||
vie_calculate_gla(enum vm_cpu_mode cpu_mode, int addrsize, enum vm_reg_name seg,
|
||||
struct seg_desc *desc, uint64_t offset, uint64_t *gla)
|
||||
vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
|
||||
struct seg_desc *desc, uint64_t offset, int length, int addrsize,
|
||||
int prot, uint64_t *gla)
|
||||
{
|
||||
uint64_t segbase;
|
||||
int glasize;
|
||||
uint64_t low_limit, high_limit, segbase;
|
||||
int glasize, type;
|
||||
|
||||
KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
|
||||
("%s: invalid segment %d", __func__, seg));
|
||||
KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
|
||||
("%s: invalid operand size %d", __func__, length));
|
||||
KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
|
||||
("%s: invalid prot %#x", __func__, prot));
|
||||
|
||||
glasize = (cpu_mode == CPU_MODE_64BIT) ? 8 : 4;
|
||||
if (cpu_mode == CPU_MODE_64BIT) {
|
||||
KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
|
||||
"size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
|
||||
glasize = 8;
|
||||
} else {
|
||||
KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
|
||||
"size %d for cpu mode %d", __func__, addrsize, cpu_mode));
|
||||
glasize = 4;
|
||||
/*
|
||||
* If the segment selector is loaded with a NULL selector
|
||||
* then the descriptor is unusable and attempting to use
|
||||
* it results in a #GP(0).
|
||||
*/
|
||||
if (SEG_DESC_UNUSABLE(desc))
|
||||
return (-1);
|
||||
|
||||
/*
|
||||
* The processor generates a #NP exception when a segment
|
||||
* register is loaded with a selector that points to a
|
||||
* descriptor that is not present. If this was the case then
|
||||
* it would have been checked before the VM-exit.
|
||||
*/
|
||||
KASSERT(SEG_DESC_PRESENT(desc), ("segment %d not present: %#x",
|
||||
seg, desc->access));
|
||||
|
||||
/*
|
||||
* The descriptor type must indicate a code/data segment.
|
||||
*/
|
||||
type = SEG_DESC_TYPE(desc);
|
||||
KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
|
||||
"descriptor type %#x", seg, type));
|
||||
|
||||
if (prot & PROT_READ) {
|
||||
/* #GP on a read access to a exec-only code segment */
|
||||
if ((type & 0xA) == 0x8)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if (prot & PROT_WRITE) {
|
||||
/*
|
||||
* #GP on a write access to a code segment or a
|
||||
* read-only data segment.
|
||||
*/
|
||||
if (type & 0x8) /* code segment */
|
||||
return (-1);
|
||||
|
||||
if ((type & 0xA) == 0) /* read-only data seg */
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/*
|
||||
* 'desc->limit' is fully expanded taking granularity into
|
||||
* account.
|
||||
*/
|
||||
if ((type & 0xC) == 0x4) {
|
||||
/* expand-down data segment */
|
||||
low_limit = desc->limit + 1;
|
||||
high_limit = SEG_DESC_DEF32(desc) ? 0xffffffff : 0xffff;
|
||||
} else {
|
||||
/* code segment or expand-up data segment */
|
||||
low_limit = 0;
|
||||
high_limit = desc->limit;
|
||||
}
|
||||
|
||||
while (length > 0) {
|
||||
offset &= vie_size2mask(addrsize);
|
||||
if (offset < low_limit || offset > high_limit)
|
||||
return (-1);
|
||||
offset++;
|
||||
length--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In 64-bit mode all segments except %fs and %gs have a segment
|
||||
|
@ -143,36 +143,6 @@ done:
|
||||
static int
|
||||
emulate_inout_str(struct vm *vm, int vcpuid, struct vm_exit *vmexit, bool *retu)
|
||||
{
|
||||
struct vm_inout_str *vis;
|
||||
int in;
|
||||
|
||||
vis = &vmexit->u.inout_str;
|
||||
in = vis->inout.in;
|
||||
|
||||
/*
|
||||
* ins/outs VM exit takes precedence over the following error
|
||||
* conditions that would ordinarily be checked by the processor:
|
||||
*
|
||||
* - #GP(0) due to segment being unusable.
|
||||
* - #GP(0) due to memory operand effective address outside the limit
|
||||
* of the segment.
|
||||
* - #AC(0) if alignment checking is enabled and an unaligned memory
|
||||
* reference is made at CPL=3
|
||||
*/
|
||||
|
||||
/*
|
||||
* XXX
|
||||
* inout string emulation only supported in 64-bit mode.
|
||||
*
|
||||
* The #GP(0) fault conditions described above don't apply in
|
||||
* 64-bit mode.
|
||||
*/
|
||||
if (vis->paging.cpu_mode != CPU_MODE_64BIT) {
|
||||
VCPU_CTR1(vm, vcpuid, "ins/outs not emulated in cpu mode %d",
|
||||
vis->paging.cpu_mode);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
*retu = true;
|
||||
return (0); /* Return to userspace to finish emulation */
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ register_default_iohandler(int start, int size)
|
||||
int
|
||||
emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
|
||||
{
|
||||
int addrsize, bytes, flags, in, port, rep;
|
||||
int addrsize, bytes, flags, in, port, prot, rep;
|
||||
uint32_t val;
|
||||
inout_func_t handler;
|
||||
void *arg;
|
||||
@ -141,6 +141,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
|
||||
vis = &vmexit->u.inout_str;
|
||||
rep = vis->inout.rep;
|
||||
addrsize = vis->addrsize;
|
||||
prot = in ? PROT_WRITE : PROT_READ;
|
||||
assert(addrsize == 2 || addrsize == 4 || addrsize == 8);
|
||||
|
||||
/* Index register */
|
||||
@ -152,8 +153,8 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
|
||||
|
||||
while (count) {
|
||||
if (vie_calculate_gla(vis->paging.cpu_mode,
|
||||
vis->addrsize, vis->seg_name, &vis->seg_desc,
|
||||
index, &gla)) {
|
||||
vis->seg_name, &vis->seg_desc, index, bytes,
|
||||
addrsize, prot, &gla)) {
|
||||
error = vm_inject_exception2(ctx, vcpu,
|
||||
IDT_GP, 0);
|
||||
assert(error == 0);
|
||||
@ -161,7 +162,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
|
||||
}
|
||||
|
||||
error = vm_gla2gpa(ctx, vcpu, &vis->paging, gla, bytes,
|
||||
in ? PROT_WRITE : PROT_READ, iov, nitems(iov));
|
||||
prot, iov, nitems(iov));
|
||||
assert(error == 0 || error == 1 || error == -1);
|
||||
if (error) {
|
||||
retval = (error == 1) ? INOUT_RESTART :
|
||||
|
Loading…
x
Reference in New Issue
Block a user