powerpc: Unify pmap definitions between AIM and Book-E

This is part 2 of r347078, pulling the page directory out of the Book-E
pmap.  This breaks KBI for anything that uses struct pmap (such as vm_map)
so any modules that access this must be rebuilt.
This commit is contained in:
Justin Hibbits 2019-08-12 03:03:56 +00:00
parent 263a6508a3
commit 3be09f300d
3 changed files with 51 additions and 38 deletions

View File

@ -184,6 +184,7 @@ unsigned int kernel_ptbls; /* Number of KVA ptbls. */
#ifdef __powerpc64__
unsigned int kernel_pdirs;
#endif
static uma_zone_t ptbl_root_zone;
/*
* If user pmap is processed with mmu_booke_remove and the resident count
@ -262,12 +263,14 @@ static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
#endif
#ifdef __powerpc64__
#define PMAP_ROOT_SIZE (sizeof(pte_t***) * PP2D_NENTRIES)
static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
unsigned int, boolean_t);
static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int, vm_page_t);
static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
#else
#define PMAP_ROOT_SIZE (sizeof(pte_t**) * PDIR_NENTRIES)
static void ptbl_init(void);
static struct ptbl_buf *ptbl_buf_alloc(void);
static void ptbl_buf_free(struct ptbl_buf *);
@ -600,9 +603,6 @@ pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
pte_t **pdir;
int req;
KASSERT((pdir[pp2d_idx] == NULL),
("%s: valid pdir entry exists!", __func__));
req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
while ((m = vm_page_alloc(NULL, pp2d_idx, req)) == NULL) {
PMAP_UNLOCK(pmap);
@ -632,6 +632,7 @@ pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, vm_page_t m)
pmap->pm_pp2d[pp2d_idx] = NULL;
vm_wire_sub(1);
vm_page_free_zero(m);
}
@ -660,7 +661,8 @@ pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
/*
* Free pdir page if there are no dir entries in this pdir.
*/
if (vm_page_unwire_noq(m)) {
m->wire_count--;
if (m->wire_count == 0) {
pdir_free(mmu, pmap, pp2d_idx, m);
return (1);
}
@ -682,7 +684,7 @@ pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pdir));
vm_page_wire(m);
m->wire_count++;
}
/* Allocate page table. */
@ -728,6 +730,7 @@ ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, vm_page_
pdir[pdir_idx] = NULL;
vm_wire_sub(1);
vm_page_free_zero(m);
}
@ -763,7 +766,8 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
* wire_count has the same value for all ptbl pages, so check the
* last page.
*/
if (vm_page_unwire_noq(m)) {
m->wire_count--;
if (m->wire_count == 0) {
ptbl_free(mmu, pmap, pdir, pdir_idx, m);
pdir_unhold(mmu, pmap, pp2d_idx);
return (1);
@ -789,7 +793,7 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
vm_page_wire(m);
m->wire_count++;
}
#else
@ -1545,6 +1549,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
vm_offset_t kernel_pdir, kstack0;
vm_paddr_t kstack0_phys;
void *dpcpu;
vm_offset_t kernel_ptbl_root;
debugf("mmu_booke_bootstrap: entered\n");
@ -1585,7 +1590,10 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
data_end = round_page(data_end);
#ifndef __powerpc64__
#ifdef __powerpc64__
kernel_ptbl_root = data_end;
data_end += PP2D_NENTRIES * sizeof(pte_t**);
#else
/* Allocate space for ptbl_bufs. */
ptbl_bufs = (struct ptbl_buf *)data_end;
data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
@ -1593,6 +1601,8 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
(uintptr_t)ptbl_bufs, data_end);
data_end = round_page(data_end);
kernel_ptbl_root = data_end;
data_end += PDIR_NENTRIES * sizeof(pte_t*);
#endif
/* Allocate PTE tables for kernel KVA. */
@ -1814,6 +1824,11 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
#ifndef __powerpc64__
kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
#endif
#ifdef __powerpc64__
kernel_pmap->pm_pp2d = (pte_t ***)kernel_ptbl_root;
#else
kernel_pmap->pm_pdir = (pte_t **)kernel_ptbl_root;
#endif
debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
@ -1989,6 +2004,10 @@ mmu_booke_init(mmu_t mmu)
/* Pre-fill pvzone with initial number of pv entries. */
uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
/* Create a UMA zone for page table roots. */
ptbl_root_zone = uma_zcreate("pmap root", PMAP_ROOT_SIZE,
NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, UMA_ZONE_VM);
/* Initialize ptbl allocation. */
ptbl_init();
}
@ -2182,12 +2201,13 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
CPU_ZERO(&kernel_pmap->pm_active);
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
#ifdef __powerpc64__
bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
TAILQ_INIT(&pmap->pm_pdir_list);
pmap->pm_pp2d = uma_zalloc(ptbl_root_zone, M_WAITOK);
bzero(pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
#else
bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
#endif
pmap->pm_pdir = uma_zalloc(ptbl_root_zone, M_WAITOK);
bzero(pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
TAILQ_INIT(&pmap->pm_ptbl_list);
#endif
}
/*
@ -2202,6 +2222,11 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap)
KASSERT(pmap->pm_stats.resident_count == 0,
("pmap_release: pmap resident count %ld != 0",
pmap->pm_stats.resident_count));
#ifdef __powerpc64__
uma_zfree(ptbl_root_zone, pmap->pm_pp2d);
#else
uma_zfree(ptbl_root_zone, pmap->pm_pdir);
#endif
}
/*
@ -2776,7 +2801,7 @@ mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
goto retry;
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
vm_page_wire(m);
m->wire_count++;
}
}

View File

@ -796,22 +796,18 @@ pte_lookup:
rldicl %r25, %r31, (64 - PP2D_H_L), (64 - PP2D_H_NUM)
rldimi %r21, %r25, PP2D_L_NUM, (64 - (PP2D_L_NUM + PP2D_H_NUM))
slwi %r21, %r21, PP2D_ENTRY_SHIFT /* multiply by pp2d entry size */
addi %r25, %r26, PM_PP2D /* pmap pm_pp2d[] address */
add %r25, %r25, %r21 /* offset within pm_pp2d[] table */
ld %r25, 0(%r25) /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */
ld %r25, PM_PP2D(%r26) /* pmap pm_pp2d[] address */
ldx %r25, %r25, %r21 /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */
cmpdi %r25, 0
beq 1f
beq 2f
#if PAGE_SIZE < 65536
rldicl %r21, %r31, (64 - PDIR_L), (64 - PDIR_NUM) /* pdir offset */
slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */
add %r25, %r25, %r21 /* offset within pdir table */
ld %r25, 0(%r25) /* get ptbl address, i.e. pmap->pm_pp2d[pp2d_idx][pdir_idx] */
rldicl %r21, %r31, (64 - PDIR_L), (64 - PDIR_NUM) /* pdir offset */
slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */
ldx %r25, %r25, %r21 /* get ptbl address, i.e. pmap->pm_pp2d[pp2d_idx][pdir_idx] */
cmpdi %r25, 0
beq 1f
#endif
beq 2f
rldicl %r21, %r31, (64 - PTBL_L), (64 - PTBL_NUM) /* ptbl offset */
slwi %r21, %r21, PTBL_ENTRY_SHIFT /* multiply by pte entry size */
@ -820,14 +816,13 @@ pte_lookup:
srwi %r21, %r31, PDIR_SHIFT /* pdir offset */
slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */
addi %r25, %r26, PM_PDIR /* pmap pm_dir[] address */
add %r25, %r25, %r21 /* offset within pm_pdir[] table */
lwz %r25, PM_PDIR(%r26) /* pmap pm_dir[] address */
/*
* Get ptbl address, i.e. pmap->pm_pdir[pdir_idx]
* This load may cause a Data TLB miss for non-kernel pmap!
*/
LOAD %r25, 0(%r25)
CMPI %r25, 0
lwzx %r25, %r25, %r21 /* offset within pm_pdir[] table */
cmpwi %r25, 0
beq 2f
lis %r21, PTBL_MASK@h

View File

@ -140,7 +140,6 @@ struct pmap {
struct mtx pm_mtx;
cpuset_t pm_active;
union {
#ifdef AIM
struct {
#ifdef __powerpc64__
@ -154,8 +153,6 @@ struct pmap {
struct pmap *pmap_phys;
struct pvo_tree pmap_pvo;
};
#endif
#ifdef BOOKE
struct {
/* TID to identify this pmap entries in TLB */
tlbtid_t pm_tid[MAXCPU];
@ -165,22 +162,18 @@ struct pmap {
* Page table directory,
* array of pointers to page directories.
*/
pte_t **pm_pp2d[PP2D_NENTRIES];
/* List of allocated pdir bufs (pdir kva regions). */
TAILQ_HEAD(, ptbl_buf) pm_pdir_list;
pte_t ***pm_pp2d;
#else
/*
* Page table directory,
* array of pointers to page tables.
*/
pte_t *pm_pdir[PDIR_NENTRIES];
#endif
pte_t **pm_pdir;
/* List of allocated ptbl bufs (ptbl kva regions). */
TAILQ_HEAD(, ptbl_buf) pm_ptbl_list;
};
#endif
};
};
};