Unify bulk free operations in several pmaps.

Submitted by:	Yoshihiro Ota
Reviewed by:	markj
MFC after:	2 weeks
Differential revision:	https://reviews.freebsd.org/D13485
This commit is contained in:
Konstantin Belousov 2018-03-04 20:53:20 +00:00
parent b85a98949f
commit 8c8ee2ee1c
7 changed files with 76 additions and 111 deletions

View File

@ -2369,20 +2369,6 @@ pmap_qremove(vm_offset_t sva, int count)
/***************************************************
* Page table page management routines.....
***************************************************/
static __inline void
pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
int count;
for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
vm_wire_sub(count);
}
/*
* Schedule the specified unused page table page to be freed. Specifically,
* add the page to the specified list of pages that will be released to the
@ -3282,7 +3268,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
}
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
return (m_pc);
}
@ -3781,7 +3767,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
pmap_remove_pde(pmap, pde, sva, &free, lockp);
if ((oldpde & PG_G) == 0)
pmap_invalidate_pde_page(pmap, sva, oldpde);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
@ -4183,7 +4169,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap_invalidate_all(pmap);
PMAP_UNLOCK(pmap);
pmap_delayed_invl_finished();
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
/*
@ -4281,7 +4267,7 @@ pmap_remove_all(vm_page_t m)
vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_wunlock(lock);
pmap_delayed_invl_wait(m);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
/*
@ -4949,7 +4935,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
pmap_invalidate_all(pmap);
pmap_delayed_invl_finished();
}
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
if (va >= VM_MAXUSER_ADDRESS) {
mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
if (pmap_insert_pt_page(pmap, mt)) {
@ -4978,7 +4964,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
* pages. Invalidate those entries.
*/
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@ -5159,7 +5145,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* pages. Invalidate those entries.
*/
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
mpte = NULL;
}
@ -5537,7 +5523,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*/
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free,
true);
}
goto out;
}
@ -5955,7 +5942,7 @@ pmap_remove_pages(pmap_t pmap)
rw_wunlock(lock);
pmap_invalidate_all(pmap);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
static boolean_t
@ -6440,7 +6427,7 @@ pmap_ts_referenced(vm_page_t m)
not_cleared < PMAP_TS_REFERENCED_MAX);
out:
rw_wunlock(lock);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
return (cleared + not_cleared);
}

View File

@ -2550,18 +2550,6 @@ pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags)
return (m);
}
static __inline void
pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
while ((m = SLIST_FIRST(free)) != NULL) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
}
/*
* Schedule the specified unused L2 page table page to be freed. Specifically,
* add the page to the specified list of pages that will be released to the
@ -2948,7 +2936,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
m_pc->wire_count = 1;
vm_wire_add(1);
}
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
return (m_pc);
}
@ -3711,7 +3699,7 @@ pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p",
__func__, va, pmap);
return (FALSE);
@ -4235,7 +4223,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
sched_unpin();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -4309,7 +4297,7 @@ pmap_remove_all(vm_page_t m)
vm_page_aflag_clear(m, PGA_WRITEABLE);
sched_unpin();
rw_wunlock(&pvh_global_lock);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -4496,7 +4484,7 @@ pmap_remove_pages(pmap_t pmap)
sched_unpin();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -4605,7 +4593,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
SLIST_INIT(&free);
if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) {
pmap_tlb_flush(pmap, va);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
mpt2pg = NULL;
@ -6079,7 +6067,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (pmap_unwire_pt2(dst_pmap, addr,
dst_mpt2pg, &free)) {
pmap_tlb_flush(dst_pmap, addr);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free,
false);
}
goto out;
}

View File

@ -1266,18 +1266,6 @@ pmap_qremove(vm_offset_t sva, int count)
/***************************************************
* Page table page management routines.....
***************************************************/
static __inline void
pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
while ((m = SLIST_FIRST(free)) != NULL) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
}
/*
* Schedule the specified unused page table page to be freed. Specifically,
* add the page to the specified list of pages that will be released to the
@ -1909,7 +1897,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
m_pc->wire_count = 1;
vm_wire_add(1);
}
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
return (m_pc);
}
@ -2417,7 +2405,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
if (lock != NULL)
rw_wunlock(lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -2522,7 +2510,7 @@ pmap_remove_all(vm_page_t m)
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_wunlock(lock);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -3230,7 +3218,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
SLIST_INIT(&free);
if (pmap_unwire_l3(pmap, va, mpte, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
mpte = NULL;
}
@ -3750,7 +3738,7 @@ pmap_remove_pages(pmap_t pmap)
if (lock != NULL)
rw_wunlock(lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -4211,7 +4199,7 @@ pmap_ts_referenced(vm_page_t m)
not_cleared < PMAP_TS_REFERENCED_MAX);
out:
rw_wunlock(lock);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
return (cleared + not_cleared);
}

View File

@ -1711,20 +1711,6 @@ pmap_qremove(vm_offset_t sva, int count)
/***************************************************
* Page table page management routines.....
***************************************************/
static __inline void
pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
int count;
for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
vm_wire_sub(count);
}
/*
* Schedule the specified unused page table page to be freed. Specifically,
* add the page to the specified list of pages that will be released to the
@ -2324,7 +2310,7 @@ pmap_pv_reclaim(pmap_t locked_pmap)
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
}
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
return (m_pc);
}
@ -2674,7 +2660,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
pmap_remove_pde(pmap, pde, sva, &free);
if ((oldpde & PG_G) == 0)
pmap_invalidate_pde_page(pmap, sva, oldpde);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
" in pmap %p", va, pmap);
return (FALSE);
@ -3044,7 +3030,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap_invalidate_all(pmap);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
/*
@ -3118,7 +3104,7 @@ pmap_remove_all(vm_page_t m)
vm_page_aflag_clear(m, PGA_WRITEABLE);
sched_unpin();
rw_wunlock(&pvh_global_lock);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
/*
@ -3886,7 +3872,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, mpte, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
mpte = NULL;
@ -4201,7 +4187,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
&free)) {
pmap_invalidate_page(dst_pmap,
addr);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free,
true);
}
goto out;
}
@ -4619,7 +4606,7 @@ pmap_remove_pages(pmap_t pmap)
pmap_invalidate_all(pmap);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, true);
}
/*

View File

@ -1069,18 +1069,6 @@ pmap_qremove(vm_offset_t sva, int count)
/***************************************************
* Page table page management routines.....
***************************************************/
static __inline void
pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
while ((m = SLIST_FIRST(free)) != NULL) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
}
/*
* Schedule the specified unused page table page to be freed. Specifically,
* add the page to the specified list of pages that will be released to the
@ -1876,7 +1864,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -1942,7 +1930,7 @@ pmap_remove_all(vm_page_t m)
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_wunlock(&pvh_global_lock);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -2377,7 +2365,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
SLIST_INIT(&free);
if (pmap_unwire_l3(pmap, va, mpte, &free)) {
pmap_invalidate_page(pmap, va);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
mpte = NULL;
}
@ -2783,7 +2771,7 @@ pmap_remove_pages(pmap_t pmap)
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
}
/*
@ -3085,7 +3073,7 @@ pmap_ts_referenced(vm_page_t m)
out:
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
pmap_free_zero_pages(&free);
vm_page_free_pages_toq(&free, false);
return (cleared + not_cleared);
}

View File

@ -2538,17 +2538,7 @@ vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
}
if (m_mtx != NULL)
mtx_unlock(m_mtx);
if ((m = SLIST_FIRST(&free)) != NULL) {
vmd = VM_DOMAIN(domain);
vm_domain_free_lock(vmd);
do {
MPASS(vm_phys_domain(m) == domain);
SLIST_REMOVE_HEAD(&free, plinks.s.ss);
vm_page_free_phys(vmd, m);
} while ((m = SLIST_FIRST(&free)) != NULL);
vm_domain_free_wakeup(vmd);
vm_domain_free_unlock(vmd);
}
vm_page_free_pages_toq(&free, false);
return (error);
}
@ -3248,7 +3238,42 @@ vm_page_free_toq(vm_page_t m)
}
/*
* vm_page_wire:
* vm_page_free_pages_toq:
*
* Returns a list of pages to the free list, disassociating it
* from any VM object. In other words, this is equivalent to
* calling vm_page_free_toq() for each page of a list of VM objects.
*
* The objects must be locked. The pages must be locked if it is
* managed.
*/
void
vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
{
vm_page_t m;
struct pglist pgl;
int count;
if (SLIST_EMPTY(free))
return;
count = 0;
TAILQ_INIT(&pgl);
while ((m = SLIST_FIRST(free)) != NULL) {
count++;
SLIST_REMOVE_HEAD(free, plinks.s.ss);
if (vm_page_free_prep(m, false))
TAILQ_INSERT_TAIL(&pgl, m, listq);
}
vm_page_free_phys_pglist(&pgl);
if (update_wire_count && count > 0)
vm_wire_sub(count);
}
/*
* vm_page_wire:
*
* Mark this page as wired down. If the page is fictitious, then
* its wire count must remain one.

View File

@ -535,6 +535,7 @@ void vm_page_test_dirty (vm_page_t);
vm_page_bits_t vm_page_bits(int base, int size);
void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
void vm_page_free_toq(vm_page_t m);
void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
void vm_page_dirty_KBI(vm_page_t m);
void vm_page_lock_KBI(vm_page_t m, const char *file, int line);