Remove unneeded variables in the arm64 pmap bootstrap

These are now unneeded after cleaning up the pmap bootstrap process.
Remove them and the variables that set them.

Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Andrew Turner 2022-09-26 15:26:40 +01:00
parent 04a32b802e
commit 8da1273234
6 changed files with 22 additions and 47 deletions

View File

@ -41,10 +41,8 @@ __FBSDID("$FreeBSD$");
ASSYM(BOOTPARAMS_SIZE, roundup2(sizeof(struct arm64_bootparams),
STACKALIGNBYTES + 1));
ASSYM(BP_MODULEP, offsetof(struct arm64_bootparams, modulep));
ASSYM(BP_KERN_L1PT, offsetof(struct arm64_bootparams, kern_l1pt));
ASSYM(BP_KERN_DELTA, offsetof(struct arm64_bootparams, kern_delta));
ASSYM(BP_KERN_STACK, offsetof(struct arm64_bootparams, kern_stack));
ASSYM(BP_KERN_L0PT, offsetof(struct arm64_bootparams, kern_l0pt));
ASSYM(BP_KERN_TTBR0, offsetof(struct arm64_bootparams, kern_ttbr0));
ASSYM(BP_BOOT_EL, offsetof(struct arm64_bootparams, boot_el));

View File

@ -133,10 +133,6 @@ virtdone:
/* Backup the module pointer */
mov x1, x0
/* Make the page table base a virtual address */
sub x26, x26, x29
sub x24, x24, x29
sub sp, sp, #BOOTPARAMS_SIZE
mov x0, sp
@ -144,12 +140,10 @@ virtdone:
neg x29, x29
str x1, [x0, #BP_MODULEP]
str x26, [x0, #BP_KERN_L1PT]
str x29, [x0, #BP_KERN_DELTA]
adrp x25, initstack
add x25, x25, :lo12:initstack
str x25, [x0, #BP_KERN_STACK]
str x24, [x0, #BP_KERN_L0PT]
str x27, [x0, #BP_KERN_TTBR0]
str x23, [x0, #BP_BOOT_EL]

View File

@ -820,8 +820,7 @@ initarm(struct arm64_bootparams *abp)
pan_setup();
/* Bootstrap enough of pmap to enter the kernel proper */
pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
pmap_bootstrap(KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
/* Exclude entries needed in the DMAP region, but not phys_avail */
if (efihdr != NULL)
exclude_efi_map_entries(efihdr);

View File

@ -1057,9 +1057,8 @@ pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i)
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
}
static vm_offset_t
pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
vm_offset_t freemempos)
static void
pmap_bootstrap_dmap(vm_paddr_t min_pa)
{
int i;
@ -1067,8 +1066,6 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
dmap_phys_max = 0;
dmap_max_addr = 0;
bs_state.freemempos = freemempos;
for (i = 0; i < (physmap_idx * 2); i += 2) {
bs_state.pa = physmap[i] & ~L3_OFFSET;
bs_state.va = bs_state.pa - dmap_phys_base + DMAP_MIN_ADDRESS;
@ -1120,48 +1117,38 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
}
cpu_tlb_flushID();
return (bs_state.freemempos);
}
static vm_offset_t
pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
static void
pmap_bootstrap_l2(vm_offset_t va)
{
KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
/* Leave bs_state.pa as it's only needed to bootstrap blocks and pages*/
bs_state.va = va;
bs_state.freemempos = l2_start;
for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L1_SIZE)
pmap_bootstrap_l1_table(&bs_state);
return (bs_state.freemempos);
}
static vm_offset_t
pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
static void
pmap_bootstrap_l3(vm_offset_t va)
{
KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
/* Leave bs_state.pa as it's only needed to bootstrap blocks and pages*/
bs_state.va = va;
bs_state.freemempos = l3_start;
for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L2_SIZE)
pmap_bootstrap_l2_table(&bs_state);
return (bs_state.freemempos);
}
/*
* Bootstrap the system enough to run with virtual memory.
*/
void
pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
vm_size_t kernlen)
pmap_bootstrap(vm_paddr_t kernstart, vm_size_t kernlen)
{
vm_offset_t freemempos;
vm_offset_t dpcpu, msgbufpv;
vm_paddr_t start_pa, pa, min_pa;
uint64_t kern_delta;
@ -1173,14 +1160,14 @@ pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
kern_delta = KERNBASE - kernstart;
printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
printf("%lx\n", l1pt);
printf("pmap_bootstrap %lx %lx\n", kernstart, kernlen);
printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
/* Set this early so we can use the pagetable walking functions */
kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
kernel_pmap_store.pm_l0 = pagetable_l0_ttbr1;
PMAP_LOCK_INIT(kernel_pmap);
kernel_pmap->pm_l0_paddr = l0pt - kern_delta;
kernel_pmap->pm_l0_paddr =
pmap_early_vtophys((vm_offset_t)kernel_pmap_store.pm_l0);
kernel_pmap->pm_cookie = COOKIE_FROM(-1, INT_MIN);
kernel_pmap->pm_stage = PM_STAGE1;
kernel_pmap->pm_levels = 4;
@ -1204,11 +1191,11 @@ pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
min_pa = physmap[i];
}
freemempos = KERNBASE + kernlen;
freemempos = roundup2(freemempos, PAGE_SIZE);
bs_state.freemempos = KERNBASE + kernlen;
bs_state.freemempos = roundup2(bs_state.freemempos, PAGE_SIZE);
/* Create a direct map region early so we can use it for pa -> va */
freemempos = pmap_bootstrap_dmap(l1pt, min_pa, freemempos);
pmap_bootstrap_dmap(min_pa);
bs_state.dmap_valid = true;
/*
* We only use PXN when we know nothing will be executed from it, e.g.
@ -1223,16 +1210,15 @@ pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
* loader allocated the first and only l2 page table page used to map
* the kernel, preloaded files and module metadata.
*/
freemempos = pmap_bootstrap_l2(l1pt, KERNBASE + L1_SIZE, freemempos);
pmap_bootstrap_l2(KERNBASE + L1_SIZE);
/* And the l3 tables for the early devmap */
freemempos = pmap_bootstrap_l3(l1pt,
VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE), freemempos);
pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE));
cpu_tlb_flushID();
#define alloc_pages(var, np) \
(var) = freemempos; \
freemempos += (np * PAGE_SIZE); \
(var) = bs_state.freemempos; \
bs_state.freemempos += (np * PAGE_SIZE); \
memset((char *)(var), 0, ((np) * PAGE_SIZE));
/* Allocate dynamic per-cpu area. */
@ -1244,14 +1230,14 @@ pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
msgbufp = (void *)msgbufpv;
/* Reserve some VA space for early BIOS/ACPI mapping */
preinit_map_va = roundup2(freemempos, L2_SIZE);
preinit_map_va = roundup2(bs_state.freemempos, L2_SIZE);
virtual_avail = preinit_map_va + PMAP_PREINIT_MAPPING_SIZE;
virtual_avail = roundup2(virtual_avail, L1_SIZE);
virtual_end = VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE);
kernel_vm_end = virtual_avail;
pa = pmap_early_vtophys(freemempos);
pa = pmap_early_vtophys(bs_state.freemempos);
physmem_exclude_region(start_pa, pa - start_pa, EXFLAG_NOALLOC);

View File

@ -33,10 +33,8 @@
struct arm64_bootparams {
vm_offset_t modulep;
vm_offset_t kern_l1pt; /* L1 page table for the kernel */
uint64_t kern_delta;
vm_offset_t kern_stack;
vm_offset_t kern_l0pt; /* L1 page table for the kernel */
vm_paddr_t kern_ttbr0;
int boot_el; /* EL the kernel booted from */
int pad;

View File

@ -174,7 +174,7 @@ extern vm_offset_t virtual_end;
#define pmap_vm_page_alloc_check(m)
void pmap_activate_vm(pmap_t);
void pmap_bootstrap(vm_offset_t, vm_offset_t, vm_paddr_t, vm_size_t);
void pmap_bootstrap(vm_paddr_t, vm_size_t);
int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
int pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot);
void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode);