Merge AIM and Book-E PCPU fields

This is part of a long-term goal of merging Book-E and AIM into a single GENERIC
kernel.  As more work is done, the struct may be optimized further.

Reviewed by:	nwhitehorn
This commit is contained in:
Justin Hibbits 2018-02-17 20:59:12 +00:00
parent e35dc5149d
commit bce6d88bc1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=329469
11 changed files with 72 additions and 81 deletions

View File

@ -442,7 +442,7 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
{
#ifdef __powerpc64__
/* Copy the SLB contents from the current CPU */
memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb));
memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb));
#endif
}

View File

@ -601,7 +601,7 @@ moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
uint64_t esid, slbe;
uint64_t i;
cache = PCPU_GET(slb);
cache = PCPU_GET(aim.slb);
esid = va >> ADDR_SR_SHFT;
slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
@ -840,8 +840,8 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
*/
#ifdef __powerpc64__
for (i = 0; i < 64; i++) {
pcpup->pc_slb[i].slbv = 0;
pcpup->pc_slb[i].slbe = 0;
pcpup->pc_aim.slb[i].slbv = 0;
pcpup->pc_aim.slb[i].slbe = 0;
}
#else
for (i = 0; i < 16; i++)
@ -1017,9 +1017,10 @@ moea64_pmap_init_qpages(void)
if (pc->pc_qmap_addr == 0)
panic("pmap_init_qpages: unable to allocate KVA");
PMAP_LOCK(kernel_pmap);
pc->pc_qmap_pvo = moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
pc->pc_aim.qmap_pvo =
moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
PMAP_UNLOCK(kernel_pmap);
mtx_init(&pc->pc_qmap_lock, "qmap lock", NULL, MTX_DEF);
mtx_init(&pc->pc_aim.qmap_lock, "qmap lock", NULL, MTX_DEF);
}
}
@ -1038,7 +1039,7 @@ moea64_activate(mmu_t mmu, struct thread *td)
CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
#ifdef __powerpc64__
PCPU_SET(userslb, pm->pm_slb);
PCPU_SET(aim.userslb, pm->pm_slb);
__asm __volatile("slbmte %0, %1; isync" ::
"r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
#else
@ -1057,7 +1058,7 @@ moea64_deactivate(mmu_t mmu, struct thread *td)
pm = &td->td_proc->p_vmspace->vm_pmap;
CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
#ifdef __powerpc64__
PCPU_SET(userslb, NULL);
PCPU_SET(aim.userslb, NULL);
#else
PCPU_SET(curpmap, NULL);
#endif
@ -1153,12 +1154,12 @@ moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
while (xfersize > 0) {
a_pg_offset = a_offset & PAGE_MASK;
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
a_cp = (char *)PHYS_TO_DMAP(
a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
a_pg_offset;
b_pg_offset = b_offset & PAGE_MASK;
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
b_cp = (char *)PHYS_TO_DMAP(
b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
b_pg_offset;
bcopy(a_cp, b_cp, cnt);
@ -1219,7 +1220,7 @@ moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
panic("moea64_zero_page: size + off > PAGE_SIZE");
if (hw_direct_map) {
bzero((caddr_t)PHYS_TO_DMAP(pa) + off, size);
bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size);
} else {
mtx_lock(&moea64_scratchpage_mtx);
moea64_set_scratchpage_pa(mmu, 0, pa);
@ -1270,10 +1271,10 @@ moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
*/
sched_pin();
mtx_assert(PCPU_PTR(qmap_lock), MA_NOTOWNED);
pvo = PCPU_GET(qmap_pvo);
mtx_assert(PCPU_PTR(aim.qmap_lock), MA_NOTOWNED);
pvo = PCPU_GET(aim.qmap_pvo);
mtx_lock(PCPU_PTR(qmap_lock));
mtx_lock(PCPU_PTR(aim.qmap_lock));
pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
(uint64_t)pa;
MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
@ -1288,10 +1289,10 @@ moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
if (hw_direct_map)
return;
mtx_assert(PCPU_PTR(qmap_lock), MA_OWNED);
mtx_assert(PCPU_PTR(aim.qmap_lock), MA_OWNED);
KASSERT(PCPU_GET(qmap_addr) == addr,
("moea64_quick_remove_page: invalid address"));
mtx_unlock(PCPU_PTR(qmap_lock));
mtx_unlock(PCPU_PTR(aim.qmap_lock));
sched_unpin();
}
@ -1417,11 +1418,11 @@ moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
* If PMAP is not bootstrapped, we are likely to be
* in real mode.
*/
__syncicache((void *)pa, sz);
__syncicache((void *)(uintptr_t)pa, sz);
} else if (pmap == kernel_pmap) {
__syncicache((void *)va, sz);
} else if (hw_direct_map) {
__syncicache((void *)PHYS_TO_DMAP(pa), sz);
__syncicache((void *)(uintptr_t)PHYS_TO_DMAP(pa), sz);
} else {
/* Use the scratch page to set up a temp mapping */
@ -1802,8 +1803,8 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
}
if (error != 0 && error != ENOENT)
panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
pa, error);
panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va,
(uintmax_t)pa, error);
}
void
@ -2767,7 +2768,7 @@ void
moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
{
*va = (void *)pa;
*va = (void *)(uintptr_t)pa;
}
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
@ -2799,7 +2800,7 @@ moea64_scan_init(mmu_t mmu)
dump_map[0].pa_start;
/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
dump_map[1].pa_start = (vm_paddr_t)(uintptr_t)msgbufp->msg_ptr;
dump_map[1].pa_size = round_page(msgbufp->msg_size);
/* 3rd: kernel VM. */

View File

@ -365,7 +365,7 @@ moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
{
int i = 0;
#ifdef __powerpc64__
struct slb *slb = PCPU_GET(slb);
struct slb *slb = PCPU_GET(aim.slb);
register_t seg0;
#endif

View File

@ -426,7 +426,7 @@ slb_insert_kernel(uint64_t slbe, uint64_t slbv)
/* We don't want to be preempted while modifying the kernel map */
critical_enter();
slbcache = PCPU_GET(slb);
slbcache = PCPU_GET(aim.slb);
/* Check for an unused slot, abusing the user slot as a full flag */
if (slbcache[USER_SLB_SLOT].slbe == 0) {

View File

@ -380,14 +380,14 @@ void
cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
{
pcpu->pc_tid_next = TID_MIN;
pcpu->pc_booke.tid_next = TID_MIN;
#ifdef SMP
uintptr_t *ptr;
int words_per_gran = RES_GRANULE / sizeof(uintptr_t);
ptr = &tlb0_miss_locks[cpuid * words_per_gran];
pcpu->pc_booke_tlb_lock = ptr;
pcpu->pc_booke.tlb_lock = ptr;
*ptr = TLB_UNLOCKED;
*(ptr + 1) = 0; /* recurse counter */
#endif

View File

@ -501,12 +501,12 @@ tlb_miss_lock(void)
if (pc != pcpup) {
CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
"tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
"tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock);
KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
("tlb_miss_lock: tried to lock self"));
tlb_lock(pc->pc_booke_tlb_lock);
tlb_lock(pc->pc_booke.tlb_lock);
CTR1(KTR_PMAP, "%s: locked", __func__);
}
@ -528,7 +528,7 @@ tlb_miss_unlock(void)
CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
__func__, pc->pc_cpuid);
tlb_unlock(pc->pc_booke_tlb_lock);
tlb_unlock(pc->pc_booke.tlb_lock);
CTR1(KTR_PMAP, "%s: unlocked", __func__);
}
@ -3738,10 +3738,10 @@ tid_alloc(pmap_t pmap)
thiscpu = PCPU_GET(cpuid);
tid = PCPU_GET(tid_next);
tid = PCPU_GET(booke.tid_next);
if (tid > TID_MAX)
tid = TID_MIN;
PCPU_SET(tid_next, tid + 1);
PCPU_SET(booke.tid_next, tid + 1);
/* If we are stealing TID then clear the relevant pmap's field */
if (tidbusy[thiscpu][tid] != NULL) {
@ -3759,7 +3759,7 @@ tid_alloc(pmap_t pmap)
__asm __volatile("msync; isync");
CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
PCPU_GET(tid_next));
PCPU_GET(booke.tid_next));
return (tid);
}

View File

@ -52,23 +52,22 @@ struct pvo_entry;
register_t pc_tempsave[CPUSAVE_LEN]; \
register_t pc_disisave[CPUSAVE_LEN]; \
register_t pc_dbsave[CPUSAVE_LEN]; \
void *pc_restore;
void *pc_restore; \
vm_offset_t pc_qmap_addr;
#define PCPU_MD_AIM32_FIELDS \
vm_offset_t pc_qmap_addr; \
struct pvo_entry *pc_qmap_pvo; \
struct mtx pc_qmap_lock; \
char __pad[128]
struct pvo_entry *qmap_pvo; \
struct mtx qmap_lock; \
char __pad[128];
#define PCPU_MD_AIM64_FIELDS \
struct slb pc_slb[64]; \
struct slb **pc_userslb; \
register_t pc_slbsave[18]; \
uint8_t pc_slbstack[1024]; \
vm_offset_t pc_qmap_addr; \
struct pvo_entry *pc_qmap_pvo; \
struct mtx pc_qmap_lock; \
char __pad[1345]
struct slb slb[64]; \
struct slb **userslb; \
register_t slbsave[18]; \
uint8_t slbstack[1024]; \
struct pvo_entry *qmap_pvo; \
struct mtx qmap_lock; \
char __pad[1345];
#ifdef __powerpc64__
#define PCPU_MD_AIM_FIELDS PCPU_MD_AIM64_FIELDS
@ -87,14 +86,13 @@ struct pvo_entry;
#define BOOKE_PCPU_PAD 429
#endif
#define PCPU_MD_BOOKE_FIELDS \
register_t pc_booke_critsave[BOOKE_CRITSAVE_LEN]; \
register_t pc_booke_mchksave[CPUSAVE_LEN]; \
register_t pc_booke_tlbsave[BOOKE_TLBSAVE_LEN]; \
register_t pc_booke_tlb_level; \
vm_offset_t pc_qmap_addr; \
uintptr_t *pc_booke_tlb_lock; \
int pc_tid_next; \
char __pad[BOOKE_PCPU_PAD]
register_t critsave[BOOKE_CRITSAVE_LEN]; \
register_t mchksave[CPUSAVE_LEN]; \
register_t tlbsave[BOOKE_TLBSAVE_LEN]; \
register_t tlb_level; \
uintptr_t *tlb_lock; \
int tid_next; \
char __pad[BOOKE_PCPU_PAD];
/* Definitions for register offsets within the exception tmp save areas */
#define CPUSAVE_R27 0 /* where r27 gets saved */
@ -129,24 +127,16 @@ struct pvo_entry;
#define TLBSAVE_BOOKE_R30 14
#define TLBSAVE_BOOKE_R31 15
#ifdef AIM
#define PCPU_MD_FIELDS \
PCPU_MD_COMMON_FIELDS \
PCPU_MD_AIM_FIELDS
#endif
#if defined(BOOKE)
#define PCPU_MD_FIELDS \
PCPU_MD_COMMON_FIELDS \
PCPU_MD_BOOKE_FIELDS
#endif
/*
* Catch-all for ports (e.g. lsof, used by gtop)
*/
#ifndef PCPU_MD_FIELDS
#define PCPU_MD_FIELDS \
int pc_md_placeholder[32]
#endif
union { \
struct { \
PCPU_MD_AIM_FIELDS \
} pc_aim; \
struct { \
PCPU_MD_BOOKE_FIELDS \
} pc_booke; \
}
#ifdef _KERNEL

View File

@ -66,11 +66,11 @@ ASSYM(PC_DBSAVE, offsetof(struct pcpu, pc_dbsave));
ASSYM(PC_RESTORE, offsetof(struct pcpu, pc_restore));
#if defined(BOOKE)
ASSYM(PC_BOOKE_CRITSAVE, offsetof(struct pcpu, pc_booke_critsave));
ASSYM(PC_BOOKE_MCHKSAVE, offsetof(struct pcpu, pc_booke_mchksave));
ASSYM(PC_BOOKE_TLBSAVE, offsetof(struct pcpu, pc_booke_tlbsave));
ASSYM(PC_BOOKE_TLB_LEVEL, offsetof(struct pcpu, pc_booke_tlb_level));
ASSYM(PC_BOOKE_TLB_LOCK, offsetof(struct pcpu, pc_booke_tlb_lock));
ASSYM(PC_BOOKE_CRITSAVE, offsetof(struct pcpu, pc_booke.critsave));
ASSYM(PC_BOOKE_MCHKSAVE, offsetof(struct pcpu, pc_booke.mchksave));
ASSYM(PC_BOOKE_TLBSAVE, offsetof(struct pcpu, pc_booke.tlbsave));
ASSYM(PC_BOOKE_TLB_LEVEL, offsetof(struct pcpu, pc_booke.tlb_level));
ASSYM(PC_BOOKE_TLB_LOCK, offsetof(struct pcpu, pc_booke.tlb_lock));
#endif
ASSYM(CPUSAVE_R27, CPUSAVE_R27*sizeof(register_t));
@ -109,10 +109,10 @@ ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
#if defined(AIM)
ASSYM(USER_ADDR, USER_ADDR);
#ifdef __powerpc64__
ASSYM(PC_KERNSLB, offsetof(struct pcpu, pc_slb));
ASSYM(PC_USERSLB, offsetof(struct pcpu, pc_userslb));
ASSYM(PC_SLBSAVE, offsetof(struct pcpu, pc_slbsave));
ASSYM(PC_SLBSTACK, offsetof(struct pcpu, pc_slbstack));
ASSYM(PC_KERNSLB, offsetof(struct pcpu, pc_aim.slb));
ASSYM(PC_USERSLB, offsetof(struct pcpu, pc_aim.userslb));
ASSYM(PC_SLBSAVE, offsetof(struct pcpu, pc_aim.slbsave));
ASSYM(PC_SLBSTACK, offsetof(struct pcpu, pc_aim.slbstack));
ASSYM(USER_SLB_SLOT, USER_SLB_SLOT);
ASSYM(USER_SLB_SLBE, USER_SLB_SLBE);
ASSYM(SEGMENT_MASK, SEGMENT_MASK);

View File

@ -652,7 +652,7 @@ handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
int i;
addr = (type == EXC_ISE) ? srr0 : dar;
slbcache = PCPU_GET(slb);
slbcache = PCPU_GET(aim.slb);
esid = (uintptr_t)addr >> ADDR_SR_SHFT;
slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;

View File

@ -121,7 +121,7 @@ mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
static void
mps3_cpu_bootstrap(mmu_t mmup, int ap)
{
struct slb *slb = PCPU_GET(slb);
struct slb *slb = PCPU_GET(aim.slb);
register_t seg0;
int i;

View File

@ -221,7 +221,7 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
static void
mphyp_cpu_bootstrap(mmu_t mmup, int ap)
{
struct slb *slb = PCPU_GET(slb);
struct slb *slb = PCPU_GET(aim.slb);
register_t seg0;
int i;