Reduce KVA pressure on OEA64 systems running in bridge mode by mapping

UMA segments at their physical addresses instead of into KVA. This emulates
the direct mapping behavior of OEA32 in an ad-hoc way. To make this work
properly required sharing the entire kernel PMAP with Open Firmware, so
ofw_pmap is transformed into a stub on 64-bit CPUs.

Also implement some more tweaks to get more mileage out of our limited
amount of KVA, principally by extending KVA into segment 16 until the
beginning of the first OFW mapping.

Reported by:	linimon
This commit is contained in:
Nathan Whitehorn 2010-02-20 16:23:29 +00:00
parent cfe174daab
commit ab73970649
6 changed files with 46 additions and 71 deletions

View File

@ -198,6 +198,11 @@ cpu_startup(void *dummy)
ptoa(physmem) / 1048576);
realmem = physmem;
if (bootverbose)
printf("available KVA = %zd (%zd MB)\n",
virtual_end - virtual_avail,
(virtual_end - virtual_avail) / 1048576);
/*
* Display any holes after the first chunk of extended memory.
*/

View File

@ -909,7 +909,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
* Set the start and end of kva.
*/
virtual_avail = VM_MIN_KERNEL_ADDRESS;
virtual_end = VM_MAX_KERNEL_ADDRESS;
virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
/*
* Allocate a kernel stack with a guard page for thread0 and map it
@ -2413,7 +2413,7 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
* If this is outside kernel virtual space, then it's a
* battable entry and doesn't require unmapping
*/
if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) {
base = trunc_page(va);
offset = va & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);

View File

@ -297,9 +297,6 @@ struct pvo_head moea64_pvo_unmanaged =
uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */
vm_offset_t pvo_allocator_start;
vm_offset_t pvo_allocator_end;
#define BPVO_POOL_SIZE 327680
static struct pvo_entry *moea64_bpvo_pool;
static int moea64_bpvo_pool_index = 0;
@ -699,6 +696,7 @@ moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
struct ofw_map translations[sz/sizeof(struct ofw_map)];
register_t msr;
vm_offset_t off;
vm_paddr_t pa_base;
int i, ofw_mappings;
bzero(translations, sz);
@ -720,33 +718,18 @@ moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
if (translations[i].om_pa_hi)
panic("OFW translations above 32-bit boundary!");
pa_base = translations[i].om_pa_lo;
/* Now enter the pages for this mapping */
/*
* Lock the ofw pmap. pmap_kenter(), which we use for the
* pages the kernel also needs, does its own locking.
*/
PMAP_LOCK(&ofw_pmap);
DISABLE_TRANS(msr);
for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
struct vm_page m;
/* Map low memory mappings into the kernel pmap, too.
* These are typically mappings made by the loader,
* so we need them if we want to keep executing. */
if (translations[i].om_va + off < SEGMENT_LENGTH)
moea64_kenter(mmup, translations[i].om_va + off,
translations[i].om_va + off);
m.phys_addr = translations[i].om_pa_lo + off;
moea64_enter_locked(&ofw_pmap,
translations[i].om_va + off, &m, VM_PROT_ALL, 1);
moea64_kenter(mmup, translations[i].om_va + off,
pa_base + off);
ofw_mappings++;
}
ENABLE_TRANS(msr);
PMAP_UNLOCK(&ofw_pmap);
}
}
@ -926,8 +909,8 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele
*/
moea64_pinit(mmup, &ofw_pmap);
ofw_pmap.pm_sr[KERNEL_SR] = kernel_pmap->pm_sr[KERNEL_SR];
ofw_pmap.pm_sr[KERNEL2_SR] = kernel_pmap->pm_sr[KERNEL2_SR];
for (i = 0; i < 16; i++)
ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i];
if ((chosen = OF_finddevice("/chosen")) == -1)
panic("moea64_bootstrap: can't find /chosen");
@ -965,15 +948,20 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele
* Set the start and end of kva.
*/
virtual_avail = VM_MIN_KERNEL_ADDRESS;
virtual_end = VM_MAX_KERNEL_ADDRESS;
virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
/*
* Allocate some stupid buffer regions.
* Figure out how far we can extend virtual_end into segment 16
* without running into existing mappings. Segment 16 is guaranteed
* to contain neither RAM nor devices (at least on Apple hardware),
* but will generally contain some OFW mappings we should not
* step on.
*/
pvo_allocator_start = virtual_avail;
virtual_avail += SEGMENT_LENGTH/4;
pvo_allocator_end = virtual_avail;
PMAP_LOCK(kernel_pmap);
while (moea64_pvo_find_va(kernel_pmap, virtual_end+1, NULL) == NULL)
virtual_end += PAGE_SIZE;
PMAP_UNLOCK(kernel_pmap);
/*
* Allocate some things for page zeroing
@ -1014,26 +1002,20 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernele
* Allocate virtual address space for the message buffer.
*/
pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE);
msgbufp = (struct msgbuf *)virtual_avail;
va = virtual_avail;
virtual_avail += round_page(MSGBUF_SIZE);
while (va < virtual_avail) {
moea64_kenter(mmup, va, pa);
msgbufp = (struct msgbuf *)msgbuf_phys;
while (pa - msgbuf_phys < MSGBUF_SIZE) {
moea64_kenter(mmup, pa, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
/*
* Allocate virtual address space for the dynamic percpu area.
*/
pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
dpcpu = (void *)virtual_avail;
va = virtual_avail;
virtual_avail += DPCPU_SIZE;
while (va < virtual_avail) {
moea64_kenter(mmup, va, pa);
dpcpu = (void *)pa;
while (pa - (vm_offset_t)dpcpu < DPCPU_SIZE) {
moea64_kenter(mmup, pa, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
dpcpu_init(dpcpu, 0);
}
@ -1412,14 +1394,10 @@ moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
break;
}
va = pvo_allocator_start;
pvo_allocator_start += PAGE_SIZE;
if (pvo_allocator_start >= pvo_allocator_end)
panic("Ran out of PVO allocator buffer space!");
va = VM_PAGE_TO_PHYS(m);
moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
&moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
&moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
PVO_WIRED | PVO_BOOTSTRAP);
if (needed_lock)
@ -1557,10 +1535,12 @@ moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
uint64_t pte_lo;
int error;
#if 0
if (!pmap_bootstrapped) {
if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)
if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end)
panic("Trying to enter an address in KVA -- %#x!\n",pa);
}
#endif
pte_lo = moea64_calc_wimg(pa);

View File

@ -56,13 +56,6 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
vm_page_t m;
int pflags;
if (!hw_direct_map) {
*flags = UMA_SLAB_KMEM;
va = (void *)kmem_malloc(kmem_map, bytes, wait);
return va;
}
*flags = UMA_SLAB_PRIV;
if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
@ -82,6 +75,10 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
}
va = (void *) VM_PAGE_TO_PHYS(m);
if (!hw_direct_map)
pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
atomic_add_int(&hw_uma_mdpages, 1);
@ -94,13 +91,11 @@ uma_small_free(void *mem, int size, u_int8_t flags)
{
vm_page_t m;
if (!hw_direct_map) {
kmem_free(kmem_map, (vm_offset_t)mem, size);
if (!hw_direct_map)
pmap_remove(kernel_pmap,(vm_offset_t)mem,
(vm_offset_t)mem + PAGE_SIZE);
return;
}
m = PHYS_TO_VM_PAGE((u_int32_t)mem);
m = PHYS_TO_VM_PAGE((vm_offset_t)mem);
m->wire_count--;
vm_page_free(m);
atomic_subtract_int(&cnt.v_wire_count, 1);

View File

@ -45,6 +45,7 @@
#define USER_SR 12
#define KERNEL_SR 13
#define KERNEL2_SR 14
#define KERNEL3_SR 15
#define KERNEL_VSIDBITS 0xfffff
#define KERNEL_SEGMENT (0xfffff0 + KERNEL_SR)
#define KERNEL2_SEGMENT (0xfffff0 + KERNEL2_SR)

View File

@ -98,7 +98,8 @@
#define KERNBASE 0x00100000 /* start of kernel virtual */
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)(KERNEL_SR << ADDR_SR_SHFT))
#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH - 1)
#define VM_MAX_SAFE_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH -1)
#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 3*SEGMENT_LENGTH - 1)
/*
* Use the direct-mapped BAT registers for UMA small allocs. This
@ -106,13 +107,6 @@
*/
#define UMA_MD_SMALL_ALLOC
/*
* On 64-bit systems in bridge mode, we have no direct map, so we fake
* the small_alloc() calls. But we need the VM to be in a reasonable
* state first.
*/
#define UMA_MD_SMALL_ALLOC_NEEDS_VM
#else
/*