More PMAP performance improvements: skip 256 MB segments entirely if they
are are not mapped during ranged operations and reduce the scope of the tlbie lock only to the actual tlbie instruction instead of the entire sequence. There are a few more optimization possibilities here as well.
This commit is contained in:
parent
3f8d720f87
commit
7e55df27cb
@ -1981,10 +1981,18 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
||||
LOCK_TABLE_RD();
|
||||
PMAP_LOCK(pm);
|
||||
if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
|
||||
for (; sva < eva; sva += PAGE_SIZE) {
|
||||
while (sva < eva) {
|
||||
#ifdef __powerpc64__
|
||||
if (pm != kernel_pmap &&
|
||||
user_va_to_slb_entry(pm, sva) == NULL) {
|
||||
sva = roundup2(sva + 1, SEGMENT_LENGTH);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
pvo = moea64_pvo_find_va(pm, sva);
|
||||
if (pvo != NULL)
|
||||
moea64_pvo_protect(mmu, pm, pvo, prot);
|
||||
sva += PAGE_SIZE;
|
||||
}
|
||||
} else {
|
||||
LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
|
||||
@ -2095,10 +2103,18 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
LOCK_TABLE_WR();
|
||||
PMAP_LOCK(pm);
|
||||
if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
|
||||
for (; sva < eva; sva += PAGE_SIZE) {
|
||||
while (sva < eva) {
|
||||
#ifdef __powerpc64__
|
||||
if (pm != kernel_pmap &&
|
||||
user_va_to_slb_entry(pm, sva) == NULL) {
|
||||
sva = roundup2(sva + 1, SEGMENT_LENGTH);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
pvo = moea64_pvo_find_va(pm, sva);
|
||||
if (pvo != NULL)
|
||||
moea64_pvo_remove(mmu, pvo);
|
||||
sva += PAGE_SIZE;
|
||||
}
|
||||
} else {
|
||||
LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
|
||||
@ -2566,7 +2582,7 @@ moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
|
||||
|
||||
ppa = trunc_page(pa);
|
||||
offset = pa & PAGE_MASK;
|
||||
size = roundup(offset + size, PAGE_SIZE);
|
||||
size = roundup2(offset + size, PAGE_SIZE);
|
||||
|
||||
va = kmem_alloc_nofault(kernel_map, size);
|
||||
|
||||
@ -2597,7 +2613,7 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
||||
|
||||
base = trunc_page(va);
|
||||
offset = va & PAGE_MASK;
|
||||
size = roundup(offset + size, PAGE_SIZE);
|
||||
size = roundup2(offset + size, PAGE_SIZE);
|
||||
|
||||
kmem_free(kernel_map, base, size);
|
||||
}
|
||||
|
@ -103,6 +103,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/sched.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
@ -152,15 +153,13 @@ TLBIE(uint64_t vpn) {
|
||||
vpn &= ~(0xffffULL << 48);
|
||||
|
||||
#ifdef __powerpc64__
|
||||
sched_pin();
|
||||
__asm __volatile("ptesync");
|
||||
mtx_lock(&tlbie_mutex);
|
||||
__asm __volatile("\
|
||||
ptesync; \
|
||||
tlbie %0; \
|
||||
eieio; \
|
||||
tlbsync; \
|
||||
ptesync;"
|
||||
:: "r"(vpn) : "memory");
|
||||
__asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
|
||||
mtx_unlock(&tlbie_mutex);
|
||||
__asm __volatile("eieio; tlbsync; ptesync");
|
||||
sched_unpin();
|
||||
#else
|
||||
vpn_hi = (uint32_t)(vpn >> 32);
|
||||
vpn_lo = (uint32_t)vpn;
|
||||
|
Loading…
x
Reference in New Issue
Block a user