Fix "MOVS" instruction memory to MMIO emulation. Currently updates to
%rdi, %rsi, etc are inadvertently bypassed along with the check to see if the instruction needs to be repeated per the 'rep' prefix. Add "MOVS" instruction support for the 'MMIO to MMIO' case. Reviewed by: neel
This commit is contained in:
parent
73d4ced07c
commit
ef7c2a82ed
@ -979,6 +979,18 @@ gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
uint64_t gla, int prot, uint64_t *gpa)
|
||||
{
|
||||
int error, fault;
|
||||
|
||||
error = gla2gpa(ctx, vcpu, paging, gla, prot, &fault, gpa);
|
||||
if (fault)
|
||||
error = fault;
|
||||
return (error);
|
||||
}
|
||||
|
||||
#ifndef min
|
||||
#define min(a,b) (((a) < (b)) ? (a) : (b))
|
||||
#endif
|
||||
|
@ -63,6 +63,8 @@ int vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len,
|
||||
int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s);
|
||||
void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len);
|
||||
int vm_get_gpa_pmap(struct vmctx *, uint64_t gpa, uint64_t *pte, int *num);
|
||||
int vm_gla2gpa(struct vmctx *, int vcpuid, struct vm_guest_paging *paging,
|
||||
uint64_t gla, int prot, uint64_t *gpa);
|
||||
uint32_t vm_get_lowmem_limit(struct vmctx *ctx);
|
||||
void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit);
|
||||
void vm_set_memflags(struct vmctx *ctx, int flags);
|
||||
|
@ -90,7 +90,7 @@ int vmm_fetch_instruction(struct vm *vm, int cpuid,
|
||||
* Returns 1 if an exception was injected into the guest.
|
||||
* Returns -1 otherwise.
|
||||
*/
|
||||
int vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
|
||||
int vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
|
||||
uint64_t gla, int prot, uint64_t *gpa);
|
||||
|
||||
void vie_init(struct vie *vie, const char *inst_bytes, int inst_length);
|
||||
|
@ -2332,7 +2332,7 @@ vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
|
||||
remaining = len;
|
||||
while (remaining > 0) {
|
||||
KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
|
||||
error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa);
|
||||
error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa);
|
||||
if (error)
|
||||
return (error);
|
||||
off = gpa & PAGE_MASK;
|
||||
|
@ -440,10 +440,10 @@ vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
|
||||
CTASSERT(PROT_WRITE == VM_PROT_WRITE);
|
||||
CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
|
||||
gg = (struct vm_gla2gpa *)data;
|
||||
error = vmm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
|
||||
error = vm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
|
||||
gg->prot, &gg->gpa);
|
||||
KASSERT(error == 0 || error == 1 || error == -1,
|
||||
("%s: vmm_gla2gpa unknown error %d", __func__, error));
|
||||
("%s: vm_gla2gpa unknown error %d", __func__, error));
|
||||
if (error >= 0) {
|
||||
/*
|
||||
* error = 0: the translation was successful
|
||||
|
@ -634,7 +634,7 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
|
||||
#else
|
||||
struct iovec copyinfo[2];
|
||||
#endif
|
||||
uint64_t dstaddr, srcaddr, val;
|
||||
uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
|
||||
uint64_t rcx, rdi, rsi, rflags;
|
||||
int error, opsize, seg, repeat;
|
||||
|
||||
@ -669,7 +669,7 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
|
||||
* (1) memory memory n/a
|
||||
* (2) memory mmio emulated
|
||||
* (3) mmio memory emulated
|
||||
* (4) mmio mmio not emulated
|
||||
* (4) mmio mmio emulated
|
||||
*
|
||||
* At this point we don't have sufficient information to distinguish
|
||||
* between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
|
||||
@ -694,7 +694,8 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
|
||||
vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
|
||||
vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
|
||||
error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
|
||||
goto done;
|
||||
if (error)
|
||||
goto done;
|
||||
} else if (error > 0) {
|
||||
/*
|
||||
* Resume guest execution to handle fault.
|
||||
@ -705,37 +706,55 @@ emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
|
||||
* 'vm_copy_setup()' is expected to fail for cases (3) and (4)
|
||||
* if 'srcaddr' is in the mmio space.
|
||||
*/
|
||||
}
|
||||
|
||||
error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
|
||||
PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr);
|
||||
if (error)
|
||||
goto done;
|
||||
|
||||
error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
|
||||
PROT_WRITE, copyinfo, nitems(copyinfo));
|
||||
if (error == 0) {
|
||||
/*
|
||||
* case (3): read from MMIO and write to system memory.
|
||||
*
|
||||
* A MMIO read can have side-effects so we commit to it
|
||||
* only after vm_copy_setup() is successful. If a page-fault
|
||||
* needs to be injected into the guest then it will happen
|
||||
* before the MMIO read is attempted.
|
||||
*/
|
||||
error = memread(vm, vcpuid, gpa, &val, opsize, arg);
|
||||
error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
|
||||
PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr);
|
||||
if (error)
|
||||
goto done;
|
||||
|
||||
vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
|
||||
vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
|
||||
} else if (error > 0) {
|
||||
/*
|
||||
* Resume guest execution to handle fault.
|
||||
*/
|
||||
goto done;
|
||||
} else {
|
||||
goto done;
|
||||
error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
|
||||
PROT_WRITE, copyinfo, nitems(copyinfo));
|
||||
if (error == 0) {
|
||||
/*
|
||||
* case (3): read from MMIO and write to system memory.
|
||||
*
|
||||
* A MMIO read can have side-effects so we
|
||||
* commit to it only after vm_copy_setup() is
|
||||
* successful. If a page-fault needs to be
|
||||
* injected into the guest then it will happen
|
||||
* before the MMIO read is attempted.
|
||||
*/
|
||||
error = memread(vm, vcpuid, gpa, &val, opsize, arg);
|
||||
if (error)
|
||||
goto done;
|
||||
|
||||
vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
|
||||
vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
|
||||
} else if (error > 0) {
|
||||
/*
|
||||
* Resume guest execution to handle fault.
|
||||
*/
|
||||
goto done;
|
||||
} else {
|
||||
/*
|
||||
* Case (4): read from and write to mmio.
|
||||
*/
|
||||
error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
|
||||
PROT_READ, &srcgpa);
|
||||
if (error)
|
||||
goto done;
|
||||
error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
|
||||
if (error)
|
||||
goto done;
|
||||
|
||||
error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
|
||||
PROT_WRITE, &dstgpa);
|
||||
if (error)
|
||||
goto done;
|
||||
error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
|
||||
if (error)
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
|
||||
@ -1465,7 +1484,7 @@ ptp_hold(struct vm *vm, vm_paddr_t ptpphys, size_t len, void **cookie)
|
||||
}
|
||||
|
||||
int
|
||||
vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
|
||||
vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
|
||||
uint64_t gla, int prot, uint64_t *gpa)
|
||||
{
|
||||
int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
|
||||
|
Loading…
Reference in New Issue
Block a user