Add the ability to have a 'fallback' search for memory ranges.

These set of ranges will be looked at if a standard memory
range isn't found, and won't be installed in the cache.
Use this to implement the memory behaviour of the PCI hole on
x86 systems, where writes are ignored and reads always return -1.
This allows breakpoints to be set when issuing a 'boot -d', which
has the side effect of accessing the PCI hole when changing the
PTE protection on kernel code, since the pmap layer hasn't been
initialized (a bug, but present in existing FreeBSD releases so
has to be handled).

Reviewed by:	neel
Obtained from:	NetApp
This commit is contained in:
Peter Grehan 2013-02-22 00:46:32 +00:00
parent 5e0fb0052b
commit 0ab13648f5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=247144
3 changed files with 65 additions and 17 deletions

View File

@ -62,7 +62,7 @@ struct mmio_rb_range {
struct mmio_rb_tree;
RB_PROTOTYPE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rbroot;
RB_HEAD(mmio_rb_tree, mmio_rb_range) mmio_rb_root, mmio_rb_fallback;
/*
* Per-vCPU cache. Since most accesses from a vCPU will be to
@ -82,13 +82,14 @@ mmio_rb_range_compare(struct mmio_rb_range *a, struct mmio_rb_range *b)
}
static int
mmio_rb_lookup(uint64_t addr, struct mmio_rb_range **entry)
mmio_rb_lookup(struct mmio_rb_tree *rbt, uint64_t addr,
struct mmio_rb_range **entry)
{
struct mmio_rb_range find, *res;
find.mr_base = find.mr_end = addr;
res = RB_FIND(mmio_rb_tree, &mmio_rbroot, &find);
res = RB_FIND(mmio_rb_tree, rbt, &find);
if (res != NULL) {
*entry = res;
@ -99,11 +100,11 @@ mmio_rb_lookup(uint64_t addr, struct mmio_rb_range **entry)
}
static int
mmio_rb_add(struct mmio_rb_range *new)
mmio_rb_add(struct mmio_rb_tree *rbt, struct mmio_rb_range *new)
{
struct mmio_rb_range *overlap;
overlap = RB_INSERT(mmio_rb_tree, &mmio_rbroot, new);
overlap = RB_INSERT(mmio_rb_tree, rbt, new);
if (overlap != NULL) {
#ifdef RB_DEBUG
@ -120,11 +121,11 @@ mmio_rb_add(struct mmio_rb_range *new)
#if 0
static void
mmio_rb_dump(void)
mmio_rb_dump(struct mmio_rb_tree *rbt)
{
struct mmio_rb_range *np;
RB_FOREACH(np, mmio_rb_tree, &mmio_rbroot) {
RB_FOREACH(np, mmio_rb_tree, rbt) {
printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end,
np->mr_param.name);
}
@ -172,22 +173,22 @@ emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie)
entry = NULL;
if (entry == NULL) {
if (mmio_rb_lookup(paddr, &entry))
if (!mmio_rb_lookup(&mmio_rb_root, paddr, &entry)) {
/* Update the per-vCPU cache */
mmio_hint[vcpu] = entry;
} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
return (ESRCH);
/* Update the per-vCPU cache */
mmio_hint[vcpu] = entry;
}
}
assert(entry != NULL && entry == mmio_hint[vcpu]);
assert(entry != NULL);
err = vmm_emulate_instruction(ctx, vcpu, paddr, vie,
mem_read, mem_write, &entry->mr_param);
return (err);
}
int
register_mem(struct mem_range *memp)
static int
register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp)
{
struct mmio_rb_range *mrp;
int err;
@ -201,7 +202,7 @@ register_mem(struct mem_range *memp)
mrp->mr_base = memp->base;
mrp->mr_end = memp->base + memp->size - 1;
err = mmio_rb_add(mrp);
err = mmio_rb_add(rbt, mrp);
if (err)
free(mrp);
} else
@ -210,9 +211,24 @@ register_mem(struct mem_range *memp)
return (err);
}
int
register_mem(struct mem_range *memp)
{
return (register_mem_int(&mmio_rb_root, memp));
}
int
register_mem_fallback(struct mem_range *memp)
{
return (register_mem_int(&mmio_rb_fallback, memp));
}
void
init_mem(void)
{
RB_INIT(&mmio_rbroot);
RB_INIT(&mmio_rb_root);
RB_INIT(&mmio_rb_fallback);
}

View File

@ -53,5 +53,6 @@ void init_mem(void);
int emulate_mem(struct vmctx *, int vcpu, uint64_t paddr, struct vie *vie);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
#endif /* _MEM_H_ */

View File

@ -846,12 +846,29 @@ pci_emul_iscap(struct pci_devinst *pi, int offset)
return (found);
}
static int
pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
int size, uint64_t *val, void *arg1, long arg2)
{
/*
* Ignore writes; return 0xff's for reads. The mem read code
* will take care of truncating to the correct size.
*/
if (dir == MEM_F_READ) {
*val = 0xffffffffffffffff;
}
return (0);
}
void
init_pci(struct vmctx *ctx)
{
struct mem_range memp;
struct pci_devemu *pde;
struct slotinfo *si;
int slot, func;
int error;
pci_emul_iobase = PCI_EMUL_IOBASE;
pci_emul_membase32 = PCI_EMUL_MEMBASE32;
@ -879,6 +896,20 @@ init_pci(struct vmctx *ctx)
lirq[11].li_generic = 1;
lirq[12].li_generic = 1;
lirq[15].li_generic = 1;
/*
* Setup the PCI hole to return 0xff's when accessed in a region
* with no devices
*/
memset(&memp, 0, sizeof(struct mem_range));
memp.name = "PCI hole";
memp.flags = MEM_F_RW;
memp.base = lomem_sz;
memp.size = (4ULL * 1024 * 1024 * 1024) - lomem_sz;
memp.handler = pci_emul_fallback_handler;
error = register_mem_fallback(&memp);
assert(error == 0);
}
int