Replace all uses of the vm page queues lock by a r/w lock that is private

to this pmap.c.  This new r/w lock is used primarily to synchronize access
to the TTE lists.  However, it will be used in a somewhat unconventional
way.  As finer-grained TTE list locking is added to each of the pmap
functions that acquire this r/w lock, its acquisition will be changed from
write to read, enabling concurrent execution of the pmap functions with
finer-grained locking.

Reviewed by:	attilio
Tested by:	flo
MFC after:	10 days
This commit is contained in:
alc 2012-05-29 01:52:38 +00:00
parent 07ebfe1b9c
commit 2616fac7a2
3 changed files with 55 additions and 47 deletions

View File

@ -43,6 +43,7 @@
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#include <sys/_rwlock.h>
#include <machine/cache.h>
#include <machine/tte.h>
@ -101,6 +102,7 @@ void pmap_set_kctx(void);
extern struct pmap kernel_pmap_store;
#define kernel_pmap (&kernel_pmap_store)
extern struct rwlock tte_list_global_lock;
extern vm_paddr_t phys_avail[];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;

View File

@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <sys/msgbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@ -133,6 +134,11 @@ vm_offset_t vm_max_kernel_address;
*/
struct pmap kernel_pmap_store;
/*
* Global tte list lock
*/
struct rwlock tte_list_global_lock;
/*
* Allocate physical memory for use in pmap_bootstrap.
*/
@ -666,6 +672,11 @@ pmap_bootstrap(u_int cpu_impl)
pm->pm_context[i] = TLB_CTX_KERNEL;
CPU_FILL(&pm->pm_active);
/*
* Initialize the global tte list lock.
*/
rw_init(&tte_list_global_lock, "tte list global");
/*
* Flush all non-locked TLB entries possibly left over by the
* firmware.
@ -876,7 +887,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
struct tte *tp;
int color;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&tte_list_global_lock, RA_WLOCKED);
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_cache_enter: fake page"));
PMAP_STATS_INC(pmap_ncache_enter);
@ -951,7 +962,7 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
struct tte *tp;
int color;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&tte_list_global_lock, RA_WLOCKED);
CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
m->md.colors[DCACHE_COLOR(va)]);
KASSERT((m->flags & PG_FICTITIOUS) == 0,
@ -1026,7 +1037,7 @@ pmap_kenter(vm_offset_t va, vm_page_t m)
vm_page_t om;
u_long data;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_STATS_INC(pmap_nkenter);
tp = tsb_kvtotte(va);
CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
@ -1088,7 +1099,7 @@ pmap_kremove(vm_offset_t va)
struct tte *tp;
vm_page_t m;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_STATS_INC(pmap_nkremove);
tp = tsb_kvtotte(va);
CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
@ -1139,19 +1150,16 @@ void
pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
{
vm_offset_t va;
int locked;
PMAP_STATS_INC(pmap_nqenter);
va = sva;
if (!(locked = mtx_owned(&vm_page_queue_mtx)))
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
while (count-- > 0) {
pmap_kenter(va, *m);
va += PAGE_SIZE;
m++;
}
if (!locked)
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
tlb_range_demap(kernel_pmap, sva, va);
}
@ -1163,18 +1171,15 @@ void
pmap_qremove(vm_offset_t sva, int count)
{
vm_offset_t va;
int locked;
PMAP_STATS_INC(pmap_nqremove);
va = sva;
if (!(locked = mtx_owned(&vm_page_queue_mtx)))
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
while (count-- > 0) {
pmap_kremove(va);
va += PAGE_SIZE;
}
if (!locked)
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
tlb_range_demap(kernel_pmap, sva, va);
}
@ -1322,7 +1327,7 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
vm_page_t m;
u_long data;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&tte_list_global_lock, RA_WLOCKED);
data = atomic_readandclear_long(&tp->tte_data);
if ((data & TD_FAKE) == 0) {
m = PHYS_TO_VM_PAGE(TD_PA(data));
@ -1359,7 +1364,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
pm->pm_context[curcpu], start, end);
if (PMAP_REMOVE_DONE(pm))
return;
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
PMAP_LOCK(pm);
if (end - start > PMAP_TSB_THRESH) {
tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
@ -1372,7 +1377,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
tlb_range_demap(pm, start, end - 1);
}
PMAP_UNLOCK(pm);
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
}
void
@ -1385,7 +1390,7 @@ pmap_remove_all(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
tpn = TAILQ_NEXT(tp, tte_link);
if ((tp->tte_data & TD_PV) == 0)
@ -1408,7 +1413,7 @@ pmap_remove_all(vm_page_t m)
PMAP_UNLOCK(pm);
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
}
static int
@ -1470,10 +1475,10 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
PMAP_LOCK(pm);
pmap_enter_locked(pm, va, m, prot, wired);
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
PMAP_UNLOCK(pm);
}
@ -1493,7 +1498,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t real;
u_long data;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
VM_OBJECT_LOCKED(m->object),
@ -1636,14 +1641,14 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
psize = atop(end - start);
m = m_start;
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
PMAP_LOCK(pm);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
pmap_enter_locked(pm, start + ptoa(diff), m, prot &
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
m = TAILQ_NEXT(m, listq);
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
PMAP_UNLOCK(pm);
}
@ -1651,11 +1656,11 @@ void
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
PMAP_LOCK(pm);
pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
PMAP_UNLOCK(pm);
}
@ -1721,7 +1726,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
if (dst_addr != src_addr)
return;
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
if (dst_pmap < src_pmap) {
PMAP_LOCK(dst_pmap);
PMAP_LOCK(src_pmap);
@ -1739,7 +1744,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
pmap_copy_tte(src_pmap, dst_pmap, tp, va);
tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
PMAP_UNLOCK(src_pmap);
PMAP_UNLOCK(dst_pmap);
}
@ -1938,7 +1943,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
("pmap_page_exists_quick: page %p is not managed", m));
loops = 0;
rv = FALSE;
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -1949,7 +1954,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
if (++loops >= 16)
break;
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
return (rv);
}
@ -1966,11 +1971,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (count);
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
count++;
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
return (count);
}
@ -1997,13 +2002,13 @@ pmap_page_is_mapped(vm_page_t m)
rv = FALSE;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (rv);
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
if ((tp->tte_data & TD_PV) != 0) {
rv = TRUE;
break;
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
return (rv);
}
@ -2029,7 +2034,7 @@ pmap_ts_referenced(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_ts_referenced: page %p is not managed", m));
count = 0;
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
tpf = tp;
do {
@ -2043,7 +2048,7 @@ pmap_ts_referenced(vm_page_t m)
break;
} while ((tp = tpn) != NULL && tp != tpf);
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
return (count);
}
@ -2066,7 +2071,7 @@ pmap_is_modified(vm_page_t m)
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -2075,7 +2080,7 @@ pmap_is_modified(vm_page_t m)
break;
}
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
return (rv);
}
@ -2109,7 +2114,7 @@ pmap_is_referenced(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
rv = FALSE;
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -2118,7 +2123,7 @@ pmap_is_referenced(vm_page_t m)
break;
}
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
return (rv);
}
@ -2141,7 +2146,7 @@ pmap_clear_modify(vm_page_t m)
*/
if ((m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -2149,7 +2154,7 @@ pmap_clear_modify(vm_page_t m)
if ((data & TD_W) != 0)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
}
void
@ -2160,7 +2165,7 @@ pmap_clear_reference(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_reference: page %p is not managed", m));
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -2168,7 +2173,7 @@ pmap_clear_reference(vm_page_t m)
if ((data & TD_REF) != 0)
tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
}
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
}
void
@ -2189,7 +2194,7 @@ pmap_remove_write(vm_page_t m)
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
vm_page_lock_queues();
rw_wlock(&tte_list_global_lock);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
if ((tp->tte_data & TD_PV) == 0)
continue;
@ -2200,7 +2205,7 @@ pmap_remove_write(vm_page_t m)
}
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
rw_wunlock(&tte_list_global_lock);
}
int

View File

@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@ -131,7 +132,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long sz, u_long data)
PMAP_STATS_INC(tsb_nenter_u_oc);
}
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
if (pm == kernel_pmap) {
PMAP_STATS_INC(tsb_nenter_k);