Fix a problem where kernel text could become unmapped when clearing out all
the user mappings from the tlb due to the context numbers rolling over. The store to the internal mmu register must be followed by a membar #Sync before much else happens to "avoid data corruption", so we use special inlines which both disable interrupts and ensure that the compiler will not insert extra instructions between the two. Also, load the tte tag and check if the context is nucleus context, rather than relying on the priviledged bit which doesn't actually serve any purpose in our design, and check the lock bit too for sanity.
This commit is contained in:
parent
2f3e2b8795
commit
bfd501b637
@ -479,21 +479,22 @@ void
|
||||
pmap_context_rollover(void)
|
||||
{
|
||||
u_long data;
|
||||
u_long tag;
|
||||
int i;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
CTR0(KTR_PMAP, "pmap_context_rollover");
|
||||
for (i = 0; i < 64; i++) {
|
||||
data = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG);
|
||||
if ((data & TD_V) != 0 && (data & TD_P) == 0) {
|
||||
stxa(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG, 0);
|
||||
membar(Sync);
|
||||
}
|
||||
tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
|
||||
if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
|
||||
TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
|
||||
stxa_sync(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG, 0);
|
||||
data = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG);
|
||||
if ((data & TD_V) != 0 && (data & TD_P) == 0) {
|
||||
stxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, 0);
|
||||
membar(Sync);
|
||||
}
|
||||
tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
|
||||
if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
|
||||
TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
|
||||
stxa_sync(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, 0);
|
||||
}
|
||||
PCPU_SET(tlb_ctx, PCPU_GET(tlb_ctx_min));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user