powerpc/radix mmu: No need for delayed TLB invalidation
x86 needs delayed TLB invalidation because invalidation requires an expensive IPI. PowerPC has had a TLB invalidation instruction since the POWER1 in 1990, so there's no need to delay anything.
This commit is contained in:
parent
f464bd04ce
commit
baeeef1d8f
@ -33,7 +33,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/param.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/epoch.h>
|
||||
#include <sys/conf.h>
|
||||
#include <sys/bitstring.h>
|
||||
#include <sys/queue.h>
|
||||
@ -406,7 +405,6 @@ static u_int64_t KPTphys; /* phys addr of kernel level 1 */
|
||||
|
||||
static vm_offset_t qframe = 0;
|
||||
static struct mtx qframe_mtx;
|
||||
static epoch_t pmap_epoch;
|
||||
|
||||
void mmu_radix_activate(mmu_t mmu, struct thread *);
|
||||
void mmu_radix_advise(mmu_t mmu, pmap_t, vm_offset_t, vm_offset_t, int);
|
||||
@ -849,43 +847,6 @@ pa_cmp(const void *a, const void *b)
|
||||
PG_M | PG_A | RPTE_EAA_MASK | PG_V)
|
||||
|
||||
|
||||
static void
|
||||
pmap_epoch_init(void *arg __unused)
|
||||
{
|
||||
pmap_epoch = epoch_alloc("pmap", EPOCH_PREEMPT | EPOCH_LOCKED);
|
||||
}
|
||||
SYSINIT(epoch, SI_SUB_EPOCH + 1, SI_ORDER_ANY, pmap_epoch_init, NULL);
|
||||
|
||||
static bool
|
||||
pmap_not_in_di(void)
|
||||
{
|
||||
|
||||
return (curthread->td_md.md_invl_gen.gen == 0);
|
||||
}
|
||||
|
||||
#define PMAP_ASSERT_NOT_IN_DI() \
|
||||
KASSERT(pmap_not_in_di(), ("DI already started"))
|
||||
|
||||
static void
|
||||
pmap_delayed_invl_started(epoch_tracker_t et)
|
||||
{
|
||||
epoch_enter_preempt(pmap_epoch, et);
|
||||
curthread->td_md.md_invl_gen.gen = 1;
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_delayed_invl_finished(epoch_tracker_t et)
|
||||
{
|
||||
curthread->td_md.md_invl_gen.gen = 0;
|
||||
epoch_exit_preempt(pmap_epoch, et);
|
||||
}
|
||||
|
||||
static void
|
||||
pmap_delayed_invl_wait(vm_page_t m __unused)
|
||||
{
|
||||
epoch_wait_preempt(pmap_epoch);
|
||||
}
|
||||
|
||||
static __inline void
|
||||
pmap_resident_count_inc(pmap_t pmap, int count)
|
||||
{
|
||||
@ -1370,8 +1331,7 @@ pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
|
||||
}
|
||||
|
||||
static void
|
||||
reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di,
|
||||
epoch_tracker_t et)
|
||||
reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap)
|
||||
{
|
||||
|
||||
if (pmap == NULL)
|
||||
@ -1379,8 +1339,6 @@ reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap, bool start_di,
|
||||
pmap_invalidate_all(pmap);
|
||||
if (pmap != locked_pmap)
|
||||
PMAP_UNLOCK(pmap);
|
||||
if (start_di)
|
||||
pmap_delayed_invl_finished(et);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1410,8 +1368,6 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
|
||||
struct spglist free;
|
||||
uint64_t inuse;
|
||||
int bit, field, freed;
|
||||
bool start_di;
|
||||
struct epoch_tracker et;
|
||||
|
||||
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
|
||||
KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
|
||||
@ -1423,13 +1379,6 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
|
||||
pc_marker = (struct pv_chunk *)&pc_marker_b;
|
||||
pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
|
||||
|
||||
/*
|
||||
* A delayed invalidation block should already be active if
|
||||
* pmap_advise() or pmap_remove() called this function by way
|
||||
* of pmap_demote_l3e_locked().
|
||||
*/
|
||||
start_di = pmap_not_in_di();
|
||||
|
||||
mtx_lock(&pv_chunks_mutex);
|
||||
active_reclaims++;
|
||||
TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
|
||||
@ -1454,21 +1403,16 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
|
||||
* corresponding pmap is locked.
|
||||
*/
|
||||
if (pmap != next_pmap) {
|
||||
reclaim_pv_chunk_leave_pmap(pmap, locked_pmap,
|
||||
start_di, &et);
|
||||
reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
|
||||
pmap = next_pmap;
|
||||
/* Avoid deadlock and lock recursion. */
|
||||
if (pmap > locked_pmap) {
|
||||
RELEASE_PV_LIST_LOCK(lockp);
|
||||
PMAP_LOCK(pmap);
|
||||
if (start_di)
|
||||
pmap_delayed_invl_started(&et);
|
||||
mtx_lock(&pv_chunks_mutex);
|
||||
continue;
|
||||
} else if (pmap != locked_pmap) {
|
||||
if (PMAP_TRYLOCK(pmap)) {
|
||||
if (start_di)
|
||||
pmap_delayed_invl_started(&et);
|
||||
mtx_lock(&pv_chunks_mutex);
|
||||
continue;
|
||||
} else {
|
||||
@ -1480,8 +1424,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
|
||||
continue;
|
||||
goto next_chunk;
|
||||
}
|
||||
} else if (start_di)
|
||||
pmap_delayed_invl_started(&et);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1570,7 +1513,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
|
||||
TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
|
||||
active_reclaims--;
|
||||
mtx_unlock(&pv_chunks_mutex);
|
||||
reclaim_pv_chunk_leave_pmap(pmap, locked_pmap, start_di, &et);
|
||||
reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
|
||||
if (m_pc == NULL && !SLIST_EMPTY(&free)) {
|
||||
m_pc = SLIST_FIRST(&free);
|
||||
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
|
||||
@ -2248,12 +2191,10 @@ mmu_radix_advise(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
vm_offset_t va, va_next;
|
||||
vm_page_t m;
|
||||
boolean_t anychanged;
|
||||
struct epoch_tracker et;
|
||||
|
||||
if (advice != MADV_DONTNEED && advice != MADV_FREE)
|
||||
return;
|
||||
anychanged = FALSE;
|
||||
pmap_delayed_invl_started(&et);
|
||||
PMAP_LOCK(pmap);
|
||||
for (; sva < eva; sva = va_next) {
|
||||
l1e = pmap_pml1e(pmap, sva);
|
||||
@ -2347,7 +2288,6 @@ mmu_radix_advise(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
if (anychanged)
|
||||
pmap_invalidate_all(pmap);
|
||||
PMAP_UNLOCK(pmap);
|
||||
pmap_delayed_invl_finished(&et);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3176,7 +3116,6 @@ pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
|
||||
struct spglist free;
|
||||
pml3_entry_t oldl3e, *l3e;
|
||||
vm_page_t mt, pdpg;
|
||||
struct epoch_tracker et;
|
||||
|
||||
KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
|
||||
("pmap_enter_pde: newpde is missing PG_M"));
|
||||
@ -3211,11 +3150,9 @@ pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
|
||||
*/
|
||||
(void)pmap_remove_l3e(pmap, l3e, va, &free, lockp);
|
||||
} else {
|
||||
pmap_delayed_invl_started(&et);
|
||||
if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e,
|
||||
&free, lockp))
|
||||
pmap_invalidate_all(pmap);
|
||||
pmap_delayed_invl_finished(&et);
|
||||
}
|
||||
vm_page_free_pages_toq(&free, true);
|
||||
if (va >= VM_MAXUSER_ADDRESS) {
|
||||
@ -4243,7 +4180,6 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
||||
if (lockp != NULL) {
|
||||
RELEASE_PV_LIST_LOCK(lockp);
|
||||
PMAP_UNLOCK(pmap);
|
||||
PMAP_ASSERT_NOT_IN_DI();
|
||||
vm_wait(NULL);
|
||||
PMAP_LOCK(pmap);
|
||||
}
|
||||
@ -4513,26 +4449,6 @@ mmu_radix_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
#endif
|
||||
anychanged = FALSE;
|
||||
|
||||
/*
|
||||
* Although this function delays and batches the invalidation
|
||||
* of stale TLB entries, it does not need to call
|
||||
* pmap_delayed_invl_started() and
|
||||
* pmap_delayed_invl_finished(), because it does not
|
||||
* ordinarily destroy mappings. Stale TLB entries from
|
||||
* protection-only changes need only be invalidated before the
|
||||
* pmap lock is released, because protection-only changes do
|
||||
* not destroy PV entries. Even operations that iterate over
|
||||
* a physical page's PV list of mappings, like
|
||||
* pmap_remove_write(), acquire the pmap lock for each
|
||||
* mapping. Consequently, for protection-only changes, the
|
||||
* pmap lock suffices to synchronize both page table and TLB
|
||||
* updates.
|
||||
*
|
||||
* This function only destroys a mapping if pmap_demote_l3e()
|
||||
* fails. In that case, stale TLB entries are immediately
|
||||
* invalidated.
|
||||
*/
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
for (; sva < eva; sva = va_next) {
|
||||
l1e = pmap_pml1e(pmap, sva);
|
||||
@ -5194,7 +5110,6 @@ mmu_radix_remove(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
pml2_entry_t *l2e;
|
||||
pml3_entry_t ptpaddr, *l3e;
|
||||
struct spglist free;
|
||||
struct epoch_tracker et;
|
||||
bool anyvalid;
|
||||
|
||||
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
|
||||
@ -5212,7 +5127,6 @@ mmu_radix_remove(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
sva = (sva + PAGE_MASK) & ~PAGE_MASK;
|
||||
eva = (eva + PAGE_MASK) & ~PAGE_MASK;
|
||||
|
||||
pmap_delayed_invl_started(&et);
|
||||
PMAP_LOCK(pmap);
|
||||
|
||||
/*
|
||||
@ -5301,7 +5215,6 @@ mmu_radix_remove(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
if (anyvalid)
|
||||
pmap_invalidate_all(pmap);
|
||||
PMAP_UNLOCK(pmap);
|
||||
pmap_delayed_invl_finished(&et);
|
||||
vm_page_free_pages_toq(&free, true);
|
||||
}
|
||||
|
||||
@ -5384,7 +5297,6 @@ mmu_radix_remove_all(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
rw_wunlock(lock);
|
||||
pmap_delayed_invl_wait(m);
|
||||
vm_page_free_pages_toq(&free, true);
|
||||
}
|
||||
|
||||
@ -5661,7 +5573,6 @@ mmu_radix_remove_write(mmu_t mmu, vm_page_t m)
|
||||
}
|
||||
rw_wunlock(lock);
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
pmap_delayed_invl_wait(m);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -37,18 +37,12 @@
|
||||
#ifndef _MACHINE_PROC_H_
|
||||
#define _MACHINE_PROC_H_
|
||||
|
||||
struct pmap_invl_gen {
|
||||
u_long gen; /* (k) */
|
||||
LIST_ENTRY(pmap_invl_gen) link; /* (pp) */
|
||||
};
|
||||
|
||||
/*
|
||||
* Machine-dependent part of the proc structure
|
||||
*/
|
||||
struct mdthread {
|
||||
int md_spinlock_count; /* (k) */
|
||||
register_t md_saved_msr; /* (k) */
|
||||
struct pmap_invl_gen md_invl_gen;
|
||||
};
|
||||
|
||||
struct mdproc {
|
||||
|
Loading…
Reference in New Issue
Block a user