Rewrite tid_flush() in C.

There's no need for it to be in asm.  Also, by writing in C, and marking it
static in pmap.c, it saves a branch to the function itself, as it's only used in
one location.  The generated asm is virtually identical to the handwritten code.
This commit is contained in:
Justin Hibbits 2015-12-30 02:23:14 +00:00
parent 3461218b15
commit 459021cc7d
2 changed files with 47 additions and 73 deletions

View File

@ -657,77 +657,6 @@ __boot_page_padding:
/* locore subroutines */
/************************************************************************/
/*
* void tid_flush(tlbtid_t tid);
*
* Invalidate all TLB0 entries which match the given TID. Note this is
* dedicated for cases when invalidation(s) should NOT be propagated to other
* CPUs.
*
* void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
*
* XXX: why isn't this in C?
*/
ENTRY(tid_flush)
cmpwi %r3, TID_KERNEL
beq tid_flush_end /* don't evict kernel translations */
/* Disable interrupts */
mfmsr %r10
wrteei 0
li %r6, 0 /* ways counter */
loop_ways:
li %r7, 0 /* entries [per way] counter */
loop_entries:
/* Select TLB0 and ESEL (way) */
lis %r8, MAS0_TLBSEL0@h
rlwimi %r8, %r6, 16, 14, 15
mtspr SPR_MAS0, %r8
isync
/* Select EPN (entry within the way) */
rlwinm %r8, %r7, 12, 13, 19
mtspr SPR_MAS2, %r8
isync
tlbre
/* Check if valid entry */
mfspr %r8, SPR_MAS1
andis. %r9, %r8, MAS1_VALID@h
beq next_entry /* invalid entry */
/* Check if this is our TID */
rlwinm %r9, %r8, 16, 24, 31
cmplw %r9, %r3
bne next_entry /* not our TID */
/* Clear VALID bit */
rlwinm %r8, %r8, 0, 1, 31
mtspr SPR_MAS1, %r8
isync
tlbwe
isync
msync
next_entry:
addi %r7, %r7, 1
cmpw %r7, %r5
bne loop_entries
/* Next way */
addi %r6, %r6, 1
cmpw %r6, %r4
bne loop_ways
/* Restore MSR (possibly re-enable interrupts) */
mtmsr %r10
isync
tid_flush_end:
blr
/*
* Cache disable/enable/inval sequences according
* to section 2.16 of E500CORE RM.

View File

@ -161,7 +161,6 @@ unsigned int kernel_ptbls; /* Number of KVA ptbls. */
#define PMAP_REMOVE_DONE(pmap) \
((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
extern void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
extern int elf32_nxstack;
/**************************************************************************/
@ -195,6 +194,7 @@ static unsigned int tlb1_idx;
static vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS;
static tlbtid_t tid_alloc(struct pmap *);
static void tid_flush(tlbtid_t tid);
static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
@ -2915,7 +2915,7 @@ tid_alloc(pmap_t pmap)
tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
/* Flush all entries from TLB0 matching this TID. */
tid_flush(tid, tlb0_ways, tlb0_entries_per_way);
tid_flush(tid);
}
tidbusy[thiscpu][tid] = pmap;
@ -3426,3 +3426,48 @@ tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
*va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);
return (0);
}
/*
* Invalidate all TLB0 entries which match the given TID. Note this is
* dedicated for cases when invalidations should NOT be propagated to other
* CPUs.
*/
static void
tid_flush(tlbtid_t tid)
{
register_t msr;
uint32_t mas0, mas1, mas2;
int entry, way;
/* Don't evict kernel translations */
if (tid == TID_KERNEL)
return;
msr = mfmsr();
__asm __volatile("wrteei 0");
for (way = 0; way < TLB0_WAYS; way++)
for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
mtspr(SPR_MAS0, mas0);
__asm __volatile("isync");
mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
mtspr(SPR_MAS2, mas2);
__asm __volatile("isync; tlbre");
mas1 = mfspr(SPR_MAS1);
if (!(mas1 & MAS1_VALID))
continue;
if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
continue;
mas1 &= ~MAS1_VALID;
mtspr(SPR_MAS1, mas1);
__asm __volatile("isync; tlbwe; isync; msync");
}
mtmsr(msr);
}