2010-07-13 05:32:19 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 2010 Nathan Whitehorn
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/uma.h>
|
2010-11-12 04:18:19 +00:00
|
|
|
#include <vm/vm.h>
|
2010-07-13 05:32:19 +00:00
|
|
|
#include <vm/vm_map.h>
|
2010-11-12 04:18:19 +00:00
|
|
|
#include <vm/vm_page.h>
|
|
|
|
#include <vm/vm_pageout.h>
|
|
|
|
#include <vm/vm_phys.h>
|
2010-07-13 05:32:19 +00:00
|
|
|
|
|
|
|
#include <machine/md_var.h>
|
2010-11-12 04:18:19 +00:00
|
|
|
#include <machine/platform.h>
|
2010-07-13 05:32:19 +00:00
|
|
|
#include <machine/pmap.h>
|
|
|
|
#include <machine/vmparam.h>
|
|
|
|
|
|
|
|
uintptr_t moea64_get_unique_vsid(void);
|
|
|
|
void moea64_release_vsid(uint64_t vsid);
|
2010-09-16 00:22:25 +00:00
|
|
|
static void slb_zone_init(void *);
|
|
|
|
|
|
|
|
uma_zone_t slbt_zone;
|
|
|
|
uma_zone_t slb_cache_zone;
|
|
|
|
|
|
|
|
SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
struct slbtnode {
|
|
|
|
uint16_t ua_alloc;
|
|
|
|
uint8_t ua_level;
|
|
|
|
/* Only 36 bits needed for full 64-bit address space. */
|
|
|
|
uint64_t ua_base;
|
|
|
|
union {
|
|
|
|
struct slbtnode *ua_child[16];
|
|
|
|
struct slb slb_entries[16];
|
|
|
|
} u;
|
2010-07-13 05:32:19 +00:00
|
|
|
};
|
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
/*
|
|
|
|
* For a full 64-bit address space, there are 36 bits in play in an
|
|
|
|
* esid, so 8 levels, with the leaf being at level 0.
|
|
|
|
*
|
|
|
|
* |3333|3322|2222|2222|1111|1111|11 | | | esid
|
|
|
|
* |5432|1098|7654|3210|9876|5432|1098|7654|3210| bits
|
|
|
|
* +----+----+----+----+----+----+----+----+----+--------
|
|
|
|
* | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | level
|
|
|
|
*/
|
|
|
|
#define UAD_ROOT_LEVEL 8
|
|
|
|
#define UAD_LEAF_LEVEL 0
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
static inline int
|
|
|
|
esid2idx(uint64_t esid, int level)
|
|
|
|
{
|
|
|
|
int shift;
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
shift = level * 4;
|
|
|
|
return ((esid >> shift) & 0xF);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The ua_base field should have 0 bits after the first 4*(level+1)
|
|
|
|
* bits; i.e. only
|
|
|
|
*/
|
|
|
|
#define uad_baseok(ua) \
|
|
|
|
(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
|
2010-07-13 05:32:19 +00:00
|
|
|
|
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
static inline uint64_t
|
|
|
|
esid2base(uint64_t esid, int level)
|
2010-07-13 05:32:19 +00:00
|
|
|
{
|
2010-09-16 00:22:25 +00:00
|
|
|
uint64_t mask;
|
|
|
|
int shift;
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
shift = (level + 1) * 4;
|
|
|
|
mask = ~((1ULL << shift) - 1);
|
|
|
|
return (esid & mask);
|
|
|
|
}
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
/*
|
|
|
|
* Allocate a new leaf node for the specified esid/vmhandle from the
|
|
|
|
* parent node.
|
|
|
|
*/
|
|
|
|
static struct slb *
|
|
|
|
make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
|
|
|
|
{
|
|
|
|
struct slbtnode *child;
|
|
|
|
struct slb *retval;
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
idx = esid2idx(esid, parent->ua_level);
|
|
|
|
KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
/* unlock and M_WAITOK and loop? */
|
|
|
|
child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
|
|
|
|
KASSERT(child != NULL, ("unhandled NULL case"));
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
child->ua_level = UAD_LEAF_LEVEL;
|
|
|
|
child->ua_base = esid2base(esid, child->ua_level);
|
|
|
|
idx = esid2idx(esid, child->ua_level);
|
|
|
|
child->u.slb_entries[idx].slbv = slbv;
|
|
|
|
child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
|
|
|
|
setbit(&child->ua_alloc, idx);
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
retval = &child->u.slb_entries[idx];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The above stores must be visible before the next one, so
|
|
|
|
* that a lockless searcher always sees a valid path through
|
|
|
|
* the tree.
|
|
|
|
*/
|
|
|
|
powerpc_sync();
|
|
|
|
|
|
|
|
idx = esid2idx(esid, parent->ua_level);
|
|
|
|
parent->u.ua_child[idx] = child;
|
|
|
|
setbit(&parent->ua_alloc, idx);
|
|
|
|
|
|
|
|
return (retval);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a new intermediate node to fit between the parent and
|
|
|
|
* esid.
|
|
|
|
*/
|
|
|
|
static struct slbtnode*
|
|
|
|
make_intermediate(uint64_t esid, struct slbtnode *parent)
|
|
|
|
{
|
|
|
|
struct slbtnode *child, *inter;
|
|
|
|
int idx, level;
|
|
|
|
|
|
|
|
idx = esid2idx(esid, parent->ua_level);
|
|
|
|
child = parent->u.ua_child[idx];
|
|
|
|
KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
|
|
|
|
("No need for an intermediate node?"));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the level where the existing child and our new esid
|
|
|
|
* meet. It must be lower than parent->ua_level or we would
|
|
|
|
* have chosen a different index in parent.
|
|
|
|
*/
|
|
|
|
level = child->ua_level + 1;
|
|
|
|
while (esid2base(esid, level) !=
|
|
|
|
esid2base(child->ua_base, level))
|
|
|
|
level++;
|
|
|
|
KASSERT(level < parent->ua_level,
|
|
|
|
("Found splitting level %d for %09jx and %09jx, "
|
|
|
|
"but it's the same as %p's",
|
|
|
|
level, esid, child->ua_base, parent));
|
|
|
|
|
|
|
|
/* unlock and M_WAITOK and loop? */
|
|
|
|
inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
|
|
|
|
KASSERT(inter != NULL, ("unhandled NULL case"));
|
|
|
|
|
|
|
|
/* Set up intermediate node to point to child ... */
|
|
|
|
inter->ua_level = level;
|
|
|
|
inter->ua_base = esid2base(esid, inter->ua_level);
|
|
|
|
idx = esid2idx(child->ua_base, inter->ua_level);
|
|
|
|
inter->u.ua_child[idx] = child;
|
|
|
|
setbit(&inter->ua_alloc, idx);
|
|
|
|
powerpc_sync();
|
|
|
|
|
|
|
|
/* Set up parent to point to intermediate node ... */
|
|
|
|
idx = esid2idx(inter->ua_base, parent->ua_level);
|
|
|
|
parent->u.ua_child[idx] = inter;
|
|
|
|
setbit(&parent->ua_alloc, idx);
|
|
|
|
|
|
|
|
return (inter);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
kernel_va_to_slbv(vm_offset_t va)
|
|
|
|
{
|
2011-01-15 19:16:05 +00:00
|
|
|
uint64_t slbv;
|
2010-09-16 00:22:25 +00:00
|
|
|
|
|
|
|
/* Set kernel VSID to deterministic value */
|
2010-10-30 23:07:30 +00:00
|
|
|
slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
|
2010-09-16 00:22:25 +00:00
|
|
|
|
|
|
|
/* Figure out if this is a large-page mapping */
|
|
|
|
if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
|
|
|
|
/*
|
|
|
|
* XXX: If we have set up a direct map, assumes
|
|
|
|
* all physical memory is mapped with large pages.
|
|
|
|
*/
|
|
|
|
if (mem_valid(va, 0) == 0)
|
|
|
|
slbv |= SLBV_L;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (slbv);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct slb *
|
|
|
|
user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
|
|
|
|
{
|
|
|
|
uint64_t esid = va >> ADDR_SR_SHFT;
|
|
|
|
struct slbtnode *ua;
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
ua = pm->pm_slb_tree_root;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
|
|
|
|
ua->ua_base, ua->ua_level));
|
|
|
|
idx = esid2idx(esid, ua->ua_level);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This code is specific to ppc64 where a load is
|
|
|
|
* atomic, so no need for atomic_load macro.
|
|
|
|
*/
|
|
|
|
if (ua->ua_level == UAD_LEAF_LEVEL)
|
|
|
|
return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
|
|
|
|
&ua->u.slb_entries[idx] : NULL);
|
|
|
|
|
|
|
|
ua = ua->u.ua_child[idx];
|
|
|
|
if (ua == NULL ||
|
|
|
|
esid2base(esid, ua->ua_level) != ua->ua_base)
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
2010-07-13 05:32:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
va_to_vsid(pmap_t pm, vm_offset_t va)
|
|
|
|
{
|
2010-09-16 00:22:25 +00:00
|
|
|
struct slb *entry;
|
2010-07-13 05:32:19 +00:00
|
|
|
|
|
|
|
/* Shortcut kernel case */
|
2010-07-31 21:35:15 +00:00
|
|
|
if (pm == kernel_pmap)
|
|
|
|
return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
|
2010-07-13 05:32:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is no vsid for this VA, we need to add a new entry
|
|
|
|
* to the PMAP's segment table.
|
|
|
|
*/
|
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
entry = user_va_to_slb_entry(pm, va);
|
|
|
|
|
|
|
|
if (entry == NULL)
|
2010-09-16 03:46:17 +00:00
|
|
|
return (allocate_user_vsid(pm,
|
|
|
|
(uintptr_t)va >> ADDR_SR_SHFT, 0));
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
|
2010-07-13 05:32:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
2010-09-16 03:46:17 +00:00
|
|
|
allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
|
2010-07-13 05:32:19 +00:00
|
|
|
{
|
2010-09-16 00:22:25 +00:00
|
|
|
uint64_t vsid, slbv;
|
|
|
|
struct slbtnode *ua, *next, *inter;
|
|
|
|
struct slb *slb;
|
|
|
|
int idx;
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
PMAP_LOCK_ASSERT(pm, MA_OWNED);
|
|
|
|
vsid = moea64_get_unique_vsid();
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
slbv = vsid << SLBV_VSID_SHIFT;
|
2010-07-13 05:32:19 +00:00
|
|
|
if (large)
|
2010-09-16 00:22:25 +00:00
|
|
|
slbv |= SLBV_L;
|
|
|
|
|
|
|
|
ua = pm->pm_slb_tree_root;
|
|
|
|
|
|
|
|
/* Descend to the correct leaf or NULL pointer. */
|
|
|
|
for (;;) {
|
|
|
|
KASSERT(uad_baseok(ua),
|
|
|
|
("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
|
|
|
|
idx = esid2idx(esid, ua->ua_level);
|
|
|
|
|
|
|
|
if (ua->ua_level == UAD_LEAF_LEVEL) {
|
|
|
|
ua->u.slb_entries[idx].slbv = slbv;
|
|
|
|
eieio();
|
|
|
|
ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
|
|
|
|
| SLBE_VALID;
|
|
|
|
setbit(&ua->ua_alloc, idx);
|
|
|
|
slb = &ua->u.slb_entries[idx];
|
|
|
|
break;
|
|
|
|
}
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
next = ua->u.ua_child[idx];
|
|
|
|
if (next == NULL) {
|
|
|
|
slb = make_new_leaf(esid, slbv, ua);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the next item down has an okay ua_base.
|
|
|
|
* If not, we need to allocate an intermediate node.
|
|
|
|
*/
|
|
|
|
if (esid2base(esid, next->ua_level) != next->ua_base) {
|
|
|
|
inter = make_intermediate(esid, ua);
|
|
|
|
slb = make_new_leaf(esid, slbv, inter);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ua = next;
|
2010-07-13 05:32:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Someone probably wants this soon, and it may be a wired
|
|
|
|
* SLB mapping, so pre-spill this entry.
|
|
|
|
*/
|
2010-09-16 00:22:25 +00:00
|
|
|
eieio();
|
2010-09-16 03:46:17 +00:00
|
|
|
slb_insert_user(pm, slb);
|
2010-07-13 05:32:19 +00:00
|
|
|
|
|
|
|
return (vsid);
|
|
|
|
}
|
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
void
|
|
|
|
free_vsid(pmap_t pm, uint64_t esid, int large)
|
|
|
|
{
|
|
|
|
struct slbtnode *ua;
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
PMAP_LOCK_ASSERT(pm, MA_OWNED);
|
|
|
|
|
|
|
|
ua = pm->pm_slb_tree_root;
|
|
|
|
/* Descend to the correct leaf. */
|
|
|
|
for (;;) {
|
|
|
|
KASSERT(uad_baseok(ua),
|
|
|
|
("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
|
|
|
|
|
|
|
|
idx = esid2idx(esid, ua->ua_level);
|
|
|
|
if (ua->ua_level == UAD_LEAF_LEVEL) {
|
|
|
|
ua->u.slb_entries[idx].slbv = 0;
|
|
|
|
eieio();
|
|
|
|
ua->u.slb_entries[idx].slbe = 0;
|
|
|
|
clrbit(&ua->ua_alloc, idx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ua = ua->u.ua_child[idx];
|
|
|
|
if (ua == NULL ||
|
|
|
|
esid2base(esid, ua->ua_level) != ua->ua_base) {
|
|
|
|
/* Perhaps just return instead of assert? */
|
|
|
|
KASSERT(0,
|
|
|
|
("Asked to remove an entry that was never inserted!"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
free_slb_tree_node(struct slbtnode *ua)
|
|
|
|
{
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
for (idx = 0; idx < 16; idx++) {
|
|
|
|
if (ua->ua_level != UAD_LEAF_LEVEL) {
|
|
|
|
if (ua->u.ua_child[idx] != NULL)
|
|
|
|
free_slb_tree_node(ua->u.ua_child[idx]);
|
|
|
|
} else {
|
|
|
|
if (ua->u.slb_entries[idx].slbv != 0)
|
|
|
|
moea64_release_vsid(ua->u.slb_entries[idx].slbv
|
|
|
|
>> SLBV_VSID_SHIFT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uma_zfree(slbt_zone, ua);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
slb_free_tree(pmap_t pm)
|
|
|
|
{
|
|
|
|
|
|
|
|
free_slb_tree_node(pm->pm_slb_tree_root);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct slbtnode *
|
|
|
|
slb_alloc_tree(void)
|
|
|
|
{
|
|
|
|
struct slbtnode *root;
|
|
|
|
|
|
|
|
root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
|
|
|
|
root->ua_level = UAD_ROOT_LEVEL;
|
|
|
|
|
|
|
|
return (root);
|
|
|
|
}
|
|
|
|
|
2010-07-13 05:32:19 +00:00
|
|
|
/* Lock entries mapping kernel text and stacks */
|
|
|
|
|
|
|
|
#define SLB_SPILLABLE(slbe) \
|
|
|
|
(((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
|
|
|
|
(slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
|
|
|
|
(slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
|
|
|
|
void
|
2010-09-16 03:46:17 +00:00
|
|
|
slb_insert_kernel(uint64_t slbe, uint64_t slbv)
|
2010-07-13 05:32:19 +00:00
|
|
|
{
|
2010-09-16 03:46:17 +00:00
|
|
|
struct slb *slbcache;
|
|
|
|
int i, j;
|
2010-07-13 05:32:19 +00:00
|
|
|
|
|
|
|
/* We don't want to be preempted while modifying the kernel map */
|
|
|
|
critical_enter();
|
|
|
|
|
2010-09-16 03:46:17 +00:00
|
|
|
slbcache = PCPU_GET(slb);
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-10-30 23:07:30 +00:00
|
|
|
/* Check for an unused slot, abusing the user slot as a full flag */
|
|
|
|
if (slbcache[USER_SLB_SLOT].slbe == 0) {
|
|
|
|
for (i = 0; i < USER_SLB_SLOT; i++) {
|
2010-09-16 03:46:17 +00:00
|
|
|
if (!(slbcache[i].slbe & SLBE_VALID))
|
|
|
|
goto fillkernslb;
|
|
|
|
}
|
|
|
|
|
2010-10-30 23:07:30 +00:00
|
|
|
if (i == USER_SLB_SLOT)
|
|
|
|
slbcache[USER_SLB_SLOT].slbe = 1;
|
2010-09-16 03:46:17 +00:00
|
|
|
}
|
2010-07-13 05:32:19 +00:00
|
|
|
|
|
|
|
for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
|
2010-10-30 23:07:30 +00:00
|
|
|
if (i == USER_SLB_SLOT)
|
2010-09-16 03:46:17 +00:00
|
|
|
continue;
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 03:46:17 +00:00
|
|
|
if (SLB_SPILLABLE(slbcache[i].slbe))
|
2010-07-13 05:32:19 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-09-16 03:46:17 +00:00
|
|
|
KASSERT(j < 64, ("All kernel SLB slots locked!"));
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-09-16 03:46:17 +00:00
|
|
|
fillkernslb:
|
|
|
|
slbcache[i].slbv = slbv;
|
|
|
|
slbcache[i].slbe = slbe | (uint64_t)i;
|
2010-07-13 05:32:19 +00:00
|
|
|
|
|
|
|
/* If it is for this CPU, put it in the SLB right away */
|
2010-09-16 03:46:17 +00:00
|
|
|
if (pmap_bootstrapped) {
|
2010-07-13 05:32:19 +00:00
|
|
|
/* slbie not required */
|
|
|
|
__asm __volatile ("slbmte %0, %1" ::
|
2010-09-16 03:46:17 +00:00
|
|
|
"r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
|
2010-07-13 05:32:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
critical_exit();
|
|
|
|
}
|
|
|
|
|
2010-09-16 03:46:17 +00:00
|
|
|
void
|
|
|
|
slb_insert_user(pmap_t pm, struct slb *slb)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
PMAP_LOCK_ASSERT(pm, MA_OWNED);
|
|
|
|
|
|
|
|
if (pm->pm_slb_len < 64) {
|
|
|
|
i = pm->pm_slb_len;
|
|
|
|
pm->pm_slb_len++;
|
|
|
|
} else {
|
|
|
|
i = mftb() % 64;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note that this replacement is atomic with respect to trap_subr */
|
|
|
|
pm->pm_slb[i] = slb;
|
|
|
|
}
|
2010-07-13 05:32:19 +00:00
|
|
|
|
2010-11-12 04:18:19 +00:00
|
|
|
static void *
|
|
|
|
slb_uma_real_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
|
|
|
{
|
|
|
|
static vm_offset_t realmax = 0;
|
|
|
|
void *va;
|
|
|
|
vm_page_t m;
|
|
|
|
|
|
|
|
if (realmax == 0)
|
|
|
|
realmax = platform_real_maxaddr();
|
|
|
|
|
|
|
|
*flags = UMA_SLAB_PRIV;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
m = vm_phys_alloc_contig(1, 0, realmax, PAGE_SIZE,
|
|
|
|
PAGE_SIZE);
|
|
|
|
if (m == NULL) {
|
|
|
|
if (wait & M_NOWAIT)
|
|
|
|
return (NULL);
|
|
|
|
VM_WAIT;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
va = (void *) VM_PAGE_TO_PHYS(m);
|
|
|
|
|
|
|
|
if (!hw_direct_map)
|
|
|
|
pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
|
|
|
|
|
|
|
|
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
|
|
|
|
bzero(va, PAGE_SIZE);
|
|
|
|
|
|
|
|
/* vm_phys_alloc_contig does not track wiring */
|
|
|
|
atomic_add_int(&cnt.v_wire_count, 1);
|
|
|
|
m->wire_count = 1;
|
|
|
|
|
|
|
|
return (va);
|
|
|
|
}
|
|
|
|
|
2010-07-13 05:32:19 +00:00
|
|
|
static void
|
|
|
|
slb_zone_init(void *dummy)
|
|
|
|
{
|
|
|
|
|
2010-09-16 00:22:25 +00:00
|
|
|
slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
|
2010-07-13 05:32:19 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
|
2010-09-16 03:46:17 +00:00
|
|
|
slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb *),
|
2010-07-13 05:32:19 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
|
2010-11-12 04:18:19 +00:00
|
|
|
|
|
|
|
if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
|
|
|
|
uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
|
|
|
|
uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
|
|
|
|
}
|
2010-07-13 05:32:19 +00:00
|
|
|
}
|
|
|
|
|
2010-09-16 03:46:17 +00:00
|
|
|
struct slb **
|
2010-07-13 05:32:19 +00:00
|
|
|
slb_alloc_user_cache(void)
|
|
|
|
{
|
|
|
|
return (uma_zalloc(slb_cache_zone, M_ZERO));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2010-09-16 03:46:17 +00:00
|
|
|
slb_free_user_cache(struct slb **slb)
|
2010-07-13 05:32:19 +00:00
|
|
|
{
|
|
|
|
uma_zfree(slb_cache_zone, slb);
|
|
|
|
}
|