diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h index 3524adf3f9b8..9b85101e6657 100644 --- a/sys/sparc64/include/pmap.h +++ b/sys/sparc64/include/pmap.h @@ -77,7 +77,7 @@ struct pmap { #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) -void pmap_bootstrap(vm_offset_t ekva); +void pmap_bootstrap(void); vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kenter(vm_offset_t va, vm_page_t m); void pmap_kremove(vm_offset_t); @@ -103,8 +103,6 @@ extern vm_paddr_t phys_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; -extern vm_paddr_t msgbuf_phys; - #ifdef PMAP_STATS SYSCTL_DECL(_debug_pmap_stats); diff --git a/sys/sparc64/sparc64/machdep.c b/sys/sparc64/sparc64/machdep.c index b90a54d0cd3d..49a2bb1c94fd 100644 --- a/sys/sparc64/sparc64/machdep.c +++ b/sys/sparc64/sparc64/machdep.c @@ -243,6 +243,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec) char *env; struct pcpu *pc; vm_offset_t end; + vm_offset_t va; caddr_t kmdp; phandle_t child; phandle_t root; @@ -368,19 +369,28 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec) * Panic if there is no metadata. Most likely the kernel was booted * directly, instead of through loader(8). */ - if (mdp == NULL || kmdp == NULL) { - printf("sparc64_init: no loader metadata.\n" + if (mdp == NULL || kmdp == NULL || end == 0 || + kernel_tlb_slots == 0 || kernel_tlbs == NULL) { + printf("sparc64_init: missing loader metadata.\n" "This probably means you are not using loader(8).\n"); panic("sparc64_init"); } /* - * Sanity check the kernel end, which is important. + * Work around the broken loader behavior of not demapping no + * longer used kernel TLB slots when unloading the kernel or + * modules. */ - if (end == 0) { - printf("sparc64_init: warning, kernel end not specified.\n" - "Attempting to continue anyway.\n"); - end = (vm_offset_t)_end; + for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M; + va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) { + printf("demapping unused kernel TLB slot (va %#lx - %#lx)\n", + va, va + PAGE_SIZE_4M - 1); + stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, + ASI_DMMU_DEMAP, 0); + stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE, + ASI_IMMU_DEMAP, 0); + flush(KERNBASE); + kernel_tlb_slots--; } /* @@ -429,7 +439,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec) /* * Initialize virtual memory and calculate physmem. */ - pmap_bootstrap(end); + pmap_bootstrap(); /* * Initialize tunables. @@ -752,6 +762,7 @@ cpu_shutdown(void *args) void cpu_flush_dcache(void *ptr, size_t len) { + /* TBD */ } diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index 765d63978c4a..6eb8da09f040 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -119,10 +119,9 @@ __FBSDID("$FreeBSD$"); extern struct mtx sched_lock; /* - * Virtual and physical address of message buffer + * Virtual address of message buffer */ struct msgbuf *msgbufp; -vm_paddr_t msgbuf_phys; /* * Map of physical memory reagions @@ -277,7 +276,7 @@ om_cmp(const void *a, const void *b) * Bootstrap the system enough to run with virtual memory. */ void -pmap_bootstrap(vm_offset_t ekva) +pmap_bootstrap(void) { struct pmap *pm; struct tte *tp; @@ -365,13 +364,14 @@ pmap_bootstrap(vm_offset_t ekva) /* * Allocate and map the dynamic per-CPU area for the BSP. */ - dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pmap_bootstrap_alloc(DPCPU_SIZE)); + pa = pmap_bootstrap_alloc(DPCPU_SIZE); + dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pa); /* * Allocate and map the message buffer. */ - msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE); - msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys); + pa = pmap_bootstrap_alloc(MSGBUF_SIZE); + msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa); /* * Patch the virtual address and the tsb mask into the trap table. @@ -420,10 +420,11 @@ pmap_bootstrap(vm_offset_t ekva) } /* - * Set the start and end of KVA. The kernel is loaded at the first - * available 4MB super page, so round up to the end of the page. + * Set the start and end of KVA. The kernel is loaded starting + * at the first available 4MB super page, so we advance to the + * end of the last one used for it. */ - virtual_avail = roundup2(ekva, PAGE_SIZE_4M); + virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M; virtual_end = vm_max_kernel_address; kernel_vm_end = vm_max_kernel_address; @@ -443,8 +444,7 @@ pmap_bootstrap(vm_offset_t ekva) * coloured properly, since we're allocating from phys_avail so the * memory won't have an associated vm_page_t. */ - pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) * - PAGE_SIZE); + pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE); kstack0_phys = pa; virtual_avail += roundup(KSTACK_GUARD_PAGES, DCACHE_COLORS) * PAGE_SIZE; @@ -587,7 +587,7 @@ pmap_bootstrap_alloc(vm_size_t size) vm_paddr_t pa; int i; - size = round_page(size); + size = roundup(size, PAGE_SIZE * DCACHE_COLORS); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (phys_avail[i + 1] - phys_avail[i] < size) continue; @@ -946,7 +946,7 @@ pmap_kremove_flags(vm_offset_t va) struct tte *tp; tp = tsb_kvtotte(va); - CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, + CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp, tp->tte_data); TTE_ZERO(tp); } @@ -1349,7 +1349,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, } CTR6(KTR_PMAP, - "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", + "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", pm->pm_context[curcpu], m, va, pa, prot, wired); /* @@ -1357,7 +1357,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, * changed, must be protection or wiring change. */ if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) { - CTR0(KTR_PMAP, "pmap_enter: update"); + CTR0(KTR_PMAP, "pmap_enter_locked: update"); PMAP_STATS_INC(pmap_nenter_update); /* @@ -1414,12 +1414,12 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, * phsyical address, delete the old mapping. */ if (tp != NULL) { - CTR0(KTR_PMAP, "pmap_enter: replace"); + CTR0(KTR_PMAP, "pmap_enter_locked: replace"); PMAP_STATS_INC(pmap_nenter_replace); pmap_remove_tte(pm, NULL, tp, va); tlb_page_demap(pm, va); } else { - CTR0(KTR_PMAP, "pmap_enter: new"); + CTR0(KTR_PMAP, "pmap_enter_locked: new"); PMAP_STATS_INC(pmap_nenter_new); }