Fix issue with restarting an "insb/insw/insl" instruction because of a page
fault on the destination buffer. Prior to this change a page fault would be detected in vm_copyout(). This was done after the I/O port access was done. If the I/O port access had side-effects (e.g. reading the uart FIFO) then restarting the instruction would result in incorrect behavior. Fix this by validating the guest linear address before doing the I/O port emulation. If the validation results in a page fault exception being injected into the guest then the instruction can now be restarted without any side-effects.
This commit is contained in:
parent
4eb12144c0
commit
6303b65d35
@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/_iovec.h>
|
||||
|
||||
#include <machine/specialreg.h>
|
||||
#include <machine/param.h>
|
||||
@ -940,7 +941,7 @@ vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
|
||||
}
|
||||
|
||||
static int
|
||||
vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
uint64_t gla, int prot, int *fault, uint64_t *gpa)
|
||||
{
|
||||
struct vm_gla2gpa gg;
|
||||
@ -965,61 +966,80 @@ vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
#endif
|
||||
|
||||
int
|
||||
vm_copyin(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
uint64_t gla, void *vp, size_t len)
|
||||
vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt)
|
||||
{
|
||||
char *dst;
|
||||
const char *src;
|
||||
uint64_t gpa;
|
||||
int error, fault, n, off;
|
||||
int error, fault, i, n, off;
|
||||
|
||||
for (i = 0; i < iovcnt; i++) {
|
||||
iov[i].iov_base = 0;
|
||||
iov[i].iov_len = 0;
|
||||
}
|
||||
|
||||
while (len) {
|
||||
assert(iovcnt > 0);
|
||||
error = gla2gpa(ctx, vcpu, paging, gla, prot, &fault, &gpa);
|
||||
if (error)
|
||||
return (-1);
|
||||
if (fault)
|
||||
return (1);
|
||||
|
||||
off = gpa & PAGE_MASK;
|
||||
n = min(len, PAGE_SIZE - off);
|
||||
|
||||
iov->iov_base = (void *)gpa;
|
||||
iov->iov_len = n;
|
||||
iov++;
|
||||
iovcnt--;
|
||||
|
||||
gla += n;
|
||||
len -= n;
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
|
||||
{
|
||||
const char *src;
|
||||
char *dst;
|
||||
uint64_t gpa;
|
||||
size_t n;
|
||||
|
||||
dst = vp;
|
||||
while (len) {
|
||||
error = vm_gla2gpa(ctx, vcpu, paging, gla, PROT_READ,
|
||||
&fault, &gpa);
|
||||
if (error)
|
||||
return (-1);
|
||||
if (fault)
|
||||
return (1);
|
||||
|
||||
off = gpa & PAGE_MASK;
|
||||
n = min(len, PAGE_SIZE - off);
|
||||
assert(iov->iov_len);
|
||||
gpa = (uint64_t)iov->iov_base;
|
||||
n = min(len, iov->iov_len);
|
||||
src = vm_map_gpa(ctx, gpa, n);
|
||||
bcopy(src, dst, n);
|
||||
|
||||
gla += n;
|
||||
iov++;
|
||||
dst += n;
|
||||
len -= n;
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
vm_copyout(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
const void *vp, uint64_t gla, size_t len)
|
||||
void
|
||||
vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
|
||||
size_t len)
|
||||
{
|
||||
uint64_t gpa;
|
||||
char *dst;
|
||||
const char *src;
|
||||
int error, fault, n, off;
|
||||
char *dst;
|
||||
uint64_t gpa;
|
||||
size_t n;
|
||||
|
||||
src = vp;
|
||||
while (len) {
|
||||
error = vm_gla2gpa(ctx, vcpu, paging, gla, PROT_WRITE,
|
||||
&fault, &gpa);
|
||||
if (error)
|
||||
return (-1);
|
||||
if (fault)
|
||||
return (1);
|
||||
|
||||
off = gpa & PAGE_MASK;
|
||||
n = min(len, PAGE_SIZE - off);
|
||||
assert(iov->iov_len);
|
||||
gpa = (uint64_t)iov->iov_base;
|
||||
n = min(len, iov->iov_len);
|
||||
dst = vm_map_gpa(ctx, gpa, n);
|
||||
bcopy(src, dst, n);
|
||||
|
||||
gla += n;
|
||||
iov++;
|
||||
src += n;
|
||||
len -= n;
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
@ -29,6 +29,7 @@
|
||||
#ifndef _VMMAPI_H_
|
||||
#define _VMMAPI_H_
|
||||
|
||||
struct iovec;
|
||||
struct vmctx;
|
||||
enum x2apic_state;
|
||||
|
||||
@ -109,10 +110,17 @@ int vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state s);
|
||||
|
||||
int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities);
|
||||
|
||||
int vm_copyin(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
uint64_t gla_src, void *dst, size_t len);
|
||||
int vm_copyout(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
const void *src, uint64_t gla_dst, size_t len);
|
||||
/*
|
||||
* Translate the GLA range [gla,gla+len) into GPA segments in 'iov'.
|
||||
* The 'iovcnt' should be big enough to accomodate all GPA segments.
|
||||
* Returns 0 on success, 1 on a guest fault condition and -1 otherwise.
|
||||
*/
|
||||
int vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
|
||||
uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt);
|
||||
void vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *guest_iov,
|
||||
void *host_dst, size_t len);
|
||||
void vm_copyout(struct vmctx *ctx, int vcpu, const void *host_src,
|
||||
struct iovec *guest_iov, size_t len);
|
||||
|
||||
/* Reset vcpu register state */
|
||||
int vcpu_reset(struct vmctx *ctx, int vcpu);
|
||||
|
@ -31,6 +31,8 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/linker_set.h>
|
||||
#include <sys/_iovec.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include <x86/psl.h>
|
||||
#include <x86/segments.h>
|
||||
@ -109,6 +111,7 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
|
||||
enum vm_reg_name idxreg;
|
||||
uint64_t gla, index, count;
|
||||
struct vm_inout_str *vis;
|
||||
struct iovec iov[2];
|
||||
|
||||
bytes = vmexit->u.inout.bytes;
|
||||
in = vmexit->u.inout.in;
|
||||
@ -157,6 +160,15 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
|
||||
return (INOUT_RESTART);
|
||||
}
|
||||
|
||||
error = vm_gla2gpa(ctx, vcpu, &vis->paging, gla, bytes,
|
||||
in ? PROT_WRITE : PROT_READ, iov, nitems(iov));
|
||||
assert(error == 0 || error == 1 || error == -1);
|
||||
if (error) {
|
||||
retval = (error == 1) ? INOUT_RESTART :
|
||||
INOUT_ERROR;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vie_alignment_check(vis->paging.cpl, bytes,
|
||||
vis->cr0, vis->rflags, gla)) {
|
||||
error = vm_inject_exception2(ctx, vcpu,
|
||||
@ -165,33 +177,16 @@ emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit, int strict)
|
||||
return (INOUT_RESTART);
|
||||
}
|
||||
|
||||
|
||||
val = 0;
|
||||
if (!in) {
|
||||
error = vm_copyin(ctx, vcpu, &vis->paging,
|
||||
gla, &val, bytes);
|
||||
assert(error == 0 || error == 1 || error == -1);
|
||||
if (error) {
|
||||
retval = (error == 1) ? INOUT_RESTART :
|
||||
INOUT_ERROR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!in)
|
||||
vm_copyin(ctx, vcpu, iov, &val, bytes);
|
||||
|
||||
retval = handler(ctx, vcpu, in, port, bytes, &val, arg);
|
||||
if (retval != 0)
|
||||
break;
|
||||
|
||||
if (in) {
|
||||
error = vm_copyout(ctx, vcpu, &vis->paging,
|
||||
&val, gla, bytes);
|
||||
assert(error == 0 || error == 1 || error == -1);
|
||||
if (error) {
|
||||
retval = (error == 1) ? INOUT_RESTART :
|
||||
INOUT_ERROR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (in)
|
||||
vm_copyout(ctx, vcpu, &val, iov, bytes);
|
||||
|
||||
/* Update index */
|
||||
if (vis->rflags & PSL_D)
|
||||
|
Loading…
x
Reference in New Issue
Block a user