Add PG_U (user/supervisor) checks when translating a guest linear address
to a guest physical address. PG_PS (page size) field is valid only in a PDE or a PDPTE so it is now checked only in non-terminal paging entries. Ignore the upper 32-bits of the CR3 for PAE paging.
This commit is contained in:
parent
3be9640f2c
commit
b0752c3683
@ -361,6 +361,7 @@ struct vm_exit {
|
||||
uint64_t cr3;
|
||||
enum vie_cpu_mode cpu_mode;
|
||||
enum vie_paging_mode paging_mode;
|
||||
int cpl;
|
||||
struct vie vie;
|
||||
} inst_emul;
|
||||
/*
|
||||
|
@ -119,7 +119,8 @@ int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
|
||||
*/
|
||||
int vmm_fetch_instruction(struct vm *vm, int cpuid,
|
||||
uint64_t rip, int inst_length, uint64_t cr3,
|
||||
enum vie_paging_mode paging_mode, struct vie *vie);
|
||||
enum vie_paging_mode paging_mode, int cpl,
|
||||
struct vie *vie);
|
||||
|
||||
void vie_init(struct vie *vie);
|
||||
|
||||
|
@ -1492,6 +1492,18 @@ vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
|
||||
return (HANDLED);
|
||||
}
|
||||
|
||||
/*
|
||||
* From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
|
||||
*/
|
||||
static int
|
||||
vmx_cpl(void)
|
||||
{
|
||||
uint32_t ssar;
|
||||
|
||||
ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
|
||||
return ((ssar >> 5) & 0x3);
|
||||
}
|
||||
|
||||
static enum vie_cpu_mode
|
||||
vmx_cpu_mode(void)
|
||||
{
|
||||
@ -1516,6 +1528,18 @@ vmx_paging_mode(void)
|
||||
return (PAGING_MODE_PAE);
|
||||
}
|
||||
|
||||
static void
|
||||
vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
|
||||
{
|
||||
vmexit->exitcode = VM_EXITCODE_INST_EMUL;
|
||||
vmexit->u.inst_emul.gpa = gpa;
|
||||
vmexit->u.inst_emul.gla = gla;
|
||||
vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
|
||||
vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode();
|
||||
vmexit->u.inst_emul.paging_mode = vmx_paging_mode();
|
||||
vmexit->u.inst_emul.cpl = vmx_cpl();
|
||||
}
|
||||
|
||||
static int
|
||||
ept_fault_type(uint64_t ept_qual)
|
||||
{
|
||||
@ -1707,12 +1731,8 @@ vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
|
||||
}
|
||||
|
||||
if (allowed) {
|
||||
vmexit->exitcode = VM_EXITCODE_INST_EMUL;
|
||||
vmexit->u.inst_emul.gpa = DEFAULT_APIC_BASE + offset;
|
||||
vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
|
||||
vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
|
||||
vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode();
|
||||
vmexit->u.inst_emul.paging_mode = vmx_paging_mode();
|
||||
vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
|
||||
VIE_INVALID_GLA);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1943,12 +1963,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
|
||||
vmexit->u.paging.fault_type = ept_fault_type(qual);
|
||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
|
||||
} else if (ept_emulation_fault(qual)) {
|
||||
vmexit->exitcode = VM_EXITCODE_INST_EMUL;
|
||||
vmexit->u.inst_emul.gpa = gpa;
|
||||
vmexit->u.inst_emul.gla = vmcs_gla();
|
||||
vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
|
||||
vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode();
|
||||
vmexit->u.inst_emul.paging_mode = vmx_paging_mode();
|
||||
vmexit_inst_emul(vmexit, gpa, vmcs_gla());
|
||||
vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
|
||||
}
|
||||
/*
|
||||
|
@ -1131,7 +1131,7 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
|
||||
struct vie *vie;
|
||||
struct vcpu *vcpu;
|
||||
struct vm_exit *vme;
|
||||
int error, inst_length;
|
||||
int cpl, error, inst_length;
|
||||
uint64_t rip, gla, gpa, cr3;
|
||||
enum vie_cpu_mode cpu_mode;
|
||||
enum vie_paging_mode paging_mode;
|
||||
@ -1147,6 +1147,7 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
|
||||
gla = vme->u.inst_emul.gla;
|
||||
gpa = vme->u.inst_emul.gpa;
|
||||
cr3 = vme->u.inst_emul.cr3;
|
||||
cpl = vme->u.inst_emul.cpl;
|
||||
cpu_mode = vme->u.inst_emul.cpu_mode;
|
||||
paging_mode = vme->u.inst_emul.paging_mode;
|
||||
vie = &vme->u.inst_emul.vie;
|
||||
@ -1155,7 +1156,7 @@ vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
|
||||
|
||||
/* Fetch, decode and emulate the faulting instruction */
|
||||
if (vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3,
|
||||
paging_mode, vie) != 0)
|
||||
paging_mode, cpl, vie) != 0)
|
||||
return (EFAULT);
|
||||
|
||||
if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, vie) != 0)
|
||||
|
@ -572,14 +572,16 @@ vie_init(struct vie *vie)
|
||||
}
|
||||
|
||||
static int
|
||||
gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
|
||||
uint64_t *gpa, enum vie_paging_mode paging_mode)
|
||||
gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys, uint64_t *gpa,
|
||||
enum vie_paging_mode paging_mode, int cpl)
|
||||
{
|
||||
int nlevels, ptpshift, ptpindex;
|
||||
int nlevels, ptpshift, ptpindex, usermode;
|
||||
uint64_t *ptpbase, pte, pgsize;
|
||||
uint32_t *ptpbase32, pte32;
|
||||
void *cookie;
|
||||
|
||||
usermode = (cpl == 3 ? 1 : 0);
|
||||
|
||||
if (paging_mode == PAGING_MODE_FLAT) {
|
||||
*gpa = gla;
|
||||
return (0);
|
||||
@ -593,7 +595,7 @@ gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
|
||||
|
||||
ptpbase32 = vm_gpa_hold(vm, ptpphys, PAGE_SIZE,
|
||||
VM_PROT_READ, &cookie);
|
||||
|
||||
|
||||
if (ptpbase32 == NULL)
|
||||
goto error;
|
||||
|
||||
@ -608,7 +610,11 @@ gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
|
||||
if ((pte32 & PG_V) == 0)
|
||||
goto error;
|
||||
|
||||
if (pte32 & PG_PS)
|
||||
if (usermode && (pte32 & PG_U) == 0)
|
||||
goto error;
|
||||
|
||||
/* XXX must be ignored if CR4.PSE=0 */
|
||||
if (nlevels > 0 && (pte32 & PG_PS) != 0)
|
||||
break;
|
||||
|
||||
ptpphys = pte32;
|
||||
@ -621,8 +627,8 @@ gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
|
||||
}
|
||||
|
||||
if (paging_mode == PAGING_MODE_PAE) {
|
||||
/* Zero out the lower 5 bits and the upper 12 bits */
|
||||
ptpphys >>= 5; ptpphys <<= 17; ptpphys >>= 12;
|
||||
/* Zero out the lower 5 bits and the upper 32 bits */
|
||||
ptpphys &= 0xffffffe0UL;
|
||||
|
||||
ptpbase = vm_gpa_hold(vm, ptpphys, sizeof(*ptpbase) * 4,
|
||||
VM_PROT_READ, &cookie);
|
||||
@ -663,7 +669,10 @@ gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
|
||||
if ((pte & PG_V) == 0)
|
||||
goto error;
|
||||
|
||||
if (pte & PG_PS) {
|
||||
if (usermode && (pte & PG_U) == 0)
|
||||
goto error;
|
||||
|
||||
if (nlevels > 0 && (pte & PG_PS) != 0) {
|
||||
if (pgsize > 1 * GB)
|
||||
goto error;
|
||||
else
|
||||
@ -684,7 +693,7 @@ error:
|
||||
|
||||
int
|
||||
vmm_fetch_instruction(struct vm *vm, int cpuid, uint64_t rip, int inst_length,
|
||||
uint64_t cr3, enum vie_paging_mode paging_mode,
|
||||
uint64_t cr3, enum vie_paging_mode paging_mode, int cpl,
|
||||
struct vie *vie)
|
||||
{
|
||||
int n, err, prot;
|
||||
@ -701,7 +710,7 @@ vmm_fetch_instruction(struct vm *vm, int cpuid, uint64_t rip, int inst_length,
|
||||
|
||||
/* Copy the instruction into 'vie' */
|
||||
while (vie->num_valid < inst_length) {
|
||||
err = gla2gpa(vm, rip, cr3, &gpa, paging_mode);
|
||||
err = gla2gpa(vm, rip, cr3, &gpa, paging_mode, cpl);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user