- Rearrange pmap_bootstrap slightly to be more in dependency order.
- Put the kernel tsb before before the kernel load address, below VM_MIN_KERNEL_ADDRESS, instead of after the kernel where it consumes usable kva. This is magic mapped so the virtual address is irrelevant, it just needs to be out of the way.
This commit is contained in:
parent
a6d86686ee
commit
950002bf92
@ -305,41 +305,30 @@ pmap_bootstrap(vm_offset_t ekva)
|
|||||||
}
|
}
|
||||||
physmem = btoc(physsz);
|
physmem = btoc(physsz);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calculate the size of kernel virtual memory, and the size and mask
|
||||||
|
* for the kernel tsb.
|
||||||
|
*/
|
||||||
virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
|
virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
|
||||||
vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
|
vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
|
||||||
tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
|
tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
|
||||||
tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
|
tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the available physical memory ranges from /memory/reg. These
|
* Allocate the kernel tsb and lock it in the tlb.
|
||||||
* are only used for kernel dumps, but it may not be wise to do prom
|
|
||||||
* calls in that situation.
|
|
||||||
*/
|
|
||||||
if ((sz = OF_getproplen(pmem, "reg")) == -1)
|
|
||||||
panic("pmap_bootstrap: getproplen /memory/reg");
|
|
||||||
if (sizeof(sparc64_memreg) < sz)
|
|
||||||
panic("pmap_bootstrap: sparc64_memreg too small");
|
|
||||||
if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
|
|
||||||
panic("pmap_bootstrap: getprop /memory/reg");
|
|
||||||
sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the start and end of kva. The kernel is loaded at the first
|
|
||||||
* available 4 meg super page, so round up to the end of the page.
|
|
||||||
*/
|
|
||||||
virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
|
|
||||||
virtual_end = vm_max_kernel_address;
|
|
||||||
kernel_vm_end = vm_max_kernel_address;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate the kernel tsb.
|
|
||||||
*/
|
*/
|
||||||
pa = pmap_bootstrap_alloc(tsb_kernel_size);
|
pa = pmap_bootstrap_alloc(tsb_kernel_size);
|
||||||
if (pa & PAGE_MASK_4M)
|
if (pa & PAGE_MASK_4M)
|
||||||
panic("pmap_bootstrap: tsb unaligned\n");
|
panic("pmap_bootstrap: tsb unaligned\n");
|
||||||
tsb_kernel_phys = pa;
|
tsb_kernel_phys = pa;
|
||||||
tsb_kernel = (struct tte *)virtual_avail;
|
tsb_kernel = (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
|
||||||
virtual_avail += tsb_kernel_size;
|
pmap_map_tsb();
|
||||||
|
bzero(tsb_kernel, tsb_kernel_size);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate the message buffer.
|
||||||
|
*/
|
||||||
|
msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Patch the virtual address and the tsb mask into the trap table.
|
* Patch the virtual address and the tsb mask into the trap table.
|
||||||
@ -372,12 +361,6 @@ pmap_bootstrap(vm_offset_t ekva)
|
|||||||
PATCH(tl1_dmmu_prot_patch_1);
|
PATCH(tl1_dmmu_prot_patch_1);
|
||||||
PATCH(tl1_dmmu_prot_patch_2);
|
PATCH(tl1_dmmu_prot_patch_2);
|
||||||
|
|
||||||
/*
|
|
||||||
* Lock it in the tlb.
|
|
||||||
*/
|
|
||||||
pmap_map_tsb();
|
|
||||||
bzero(tsb_kernel, tsb_kernel_size);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enter fake 8k pages for the 4MB kernel pages, so that
|
* Enter fake 8k pages for the 4MB kernel pages, so that
|
||||||
* pmap_kextract() will work for them.
|
* pmap_kextract() will work for them.
|
||||||
@ -393,6 +376,26 @@ pmap_bootstrap(vm_offset_t ekva)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set the start and end of kva. The kernel is loaded at the first
|
||||||
|
* available 4 meg super page, so round up to the end of the page.
|
||||||
|
*/
|
||||||
|
virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
|
||||||
|
virtual_end = vm_max_kernel_address;
|
||||||
|
kernel_vm_end = vm_max_kernel_address;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate virtual address space for the message buffer.
|
||||||
|
*/
|
||||||
|
msgbufp = (struct msgbuf *)virtual_avail;
|
||||||
|
virtual_avail += round_page(MSGBUF_SIZE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate virtual address space to map pages during a kernel dump.
|
||||||
|
*/
|
||||||
|
crashdumpmap = virtual_avail;
|
||||||
|
virtual_avail += MAXDUMPPGS * PAGE_SIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a kernel stack with guard page for thread0 and map it into
|
* Allocate a kernel stack with guard page for thread0 and map it into
|
||||||
* the kernel tsb.
|
* the kernel tsb.
|
||||||
@ -411,9 +414,13 @@ pmap_bootstrap(vm_offset_t ekva)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate the message buffer.
|
* Calculate the first and last available physical addresses.
|
||||||
*/
|
*/
|
||||||
msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
|
avail_start = phys_avail[0];
|
||||||
|
for (i = 0; phys_avail[i + 2] != 0; i += 2)
|
||||||
|
;
|
||||||
|
avail_end = phys_avail[i + 1];
|
||||||
|
Maxmem = sparc64_btop(avail_end);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add the prom mappings to the kernel tsb.
|
* Add the prom mappings to the kernel tsb.
|
||||||
@ -452,25 +459,17 @@ pmap_bootstrap(vm_offset_t ekva)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate the first and last available physical addresses.
|
* Get the available physical memory ranges from /memory/reg. These
|
||||||
|
* are only used for kernel dumps, but it may not be wise to do prom
|
||||||
|
* calls in that situation.
|
||||||
*/
|
*/
|
||||||
avail_start = phys_avail[0];
|
if ((sz = OF_getproplen(pmem, "reg")) == -1)
|
||||||
for (i = 0; phys_avail[i + 2] != 0; i += 2)
|
panic("pmap_bootstrap: getproplen /memory/reg");
|
||||||
;
|
if (sizeof(sparc64_memreg) < sz)
|
||||||
avail_end = phys_avail[i + 1];
|
panic("pmap_bootstrap: sparc64_memreg too small");
|
||||||
Maxmem = sparc64_btop(avail_end);
|
if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
|
||||||
|
panic("pmap_bootstrap: getprop /memory/reg");
|
||||||
/*
|
sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
|
||||||
* Allocate virtual address space for the message buffer.
|
|
||||||
*/
|
|
||||||
msgbufp = (struct msgbuf *)virtual_avail;
|
|
||||||
virtual_avail += round_page(MSGBUF_SIZE);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate virtual address space to map pages during a kernel dump.
|
|
||||||
*/
|
|
||||||
crashdumpmap = virtual_avail;
|
|
||||||
virtual_avail += MAXDUMPPGS * PAGE_SIZE;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize the kernel pmap (which is statically allocated).
|
* Initialize the kernel pmap (which is statically allocated).
|
||||||
|
Loading…
Reference in New Issue
Block a user