Prevent the leakage of wired pages in the following circumstances:

First, a file is mmap(2)ed and then mlock(2)ed.  Later, it is truncated.
Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the
pages beyond the EOF are unmapped and freed.  However, when the file is
mlock(2)ed, the pages beyond the EOF are unmapped but not freed because
they have a non-zero wire count.  This can be a mistake.  Specifically,
it is a mistake if the sole reason why the pages are wired is because of
wired, managed mappings.  Previously, unmapping the pages destroys these
wired, managed mappings, but does not reduce the pages' wire count.
Consequently, when the file is unmapped, the pages are not unwired
because the wired mapping has been destroyed.  Moreover, when the vm
object is finally destroyed, the pages are leaked because they are still
wired.  The fix is to reduce the pages' wired count by the number of
wired, managed mappings destroyed.  To do this, I introduce a new pmap
function pmap_page_wired_mappings() that returns the number of managed
mappings to the given physical page that are wired, and I use this
function in vm_object_page_remove().

Reviewed by: tegge
MFC after: 6 weeks
This commit is contained in:
Alan Cox 2007-11-17 22:52:29 +00:00
parent 8d1e3aed2d
commit 59677d3c0e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=173708
12 changed files with 241 additions and 1 deletions

View File

@ -2924,6 +2924,35 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
return (FALSE);
}
/*
* pmap_page_wired_mappings:
*
* Return the number of managed mappings to the given physical page
* that are wired.
*/
int
pmap_page_wired_mappings(vm_page_t m)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
int count;
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, pv->pv_va);
if ((*pte & PG_W) != 0)
count++;
PMAP_UNLOCK(pmap);
}
return (count);
}
/*
* Remove all pages from specified address space
* this aids process exit speeds. Also, this code

View File

@ -4490,6 +4490,27 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
return (FALSE);
}
/*
* pmap_page_wired_mappings:
*
* Return the number of managed mappings to the given physical page
* that are wired.
*/
int
pmap_page_wired_mappings(vm_page_t m)
{
pv_entry_t pv;
int count;
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
if ((pv->pv_flags & PVF_WIRED) != 0)
count++;
return (count);
}
/*
* pmap_ts_referenced:

View File

@ -3029,6 +3029,37 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
return (FALSE);
}
/*
* pmap_page_wired_mappings:
*
* Return the number of managed mappings to the given physical page
* that are wired.
*/
int
pmap_page_wired_mappings(vm_page_t m)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
int count;
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
sched_pin();
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
pte = pmap_pte_quick(pmap, pv->pv_va);
if ((*pte & PG_W) != 0)
count++;
PMAP_UNLOCK(pmap);
}
sched_unpin();
return (count);
}
/*
* Remove all pages from specified address space
* this aids process exit speeds. Also, this code

View File

@ -1914,6 +1914,38 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
return (FALSE);
}
/*
* pmap_page_wired_mappings:
*
* Return the number of managed mappings to the given physical page
* that are wired.
*/
int
pmap_page_wired_mappings(vm_page_t m)
{
struct ia64_lpte *pte;
pmap_t oldpmap, pmap;
pv_entry_t pv;
int count;
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = pv->pv_pmap;
PMAP_LOCK(pmap);
oldpmap = pmap_switch(pmap);
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
if (pmap_wired(pte))
count++;
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
return (count);
}
/*
* Remove all pages from specified address space
* this aids process exit speeds. Also, this code

View File

@ -322,6 +322,7 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
int moea_page_wired_mappings(mmu_t, vm_page_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@ -359,6 +360,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@ -1492,6 +1494,26 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
return (FALSE);
}
/*
* Return the number of managed mappings to the given physical page
* that are wired.
*/
int
moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
{
struct pvo_entry *pvo;
int count;
count = 0;
if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
count++;
return (count);
}
static u_int moea_vsidcontext;
void

View File

@ -430,6 +430,21 @@ METHOD void page_init {
} DEFAULT mmu_null_page_init;
/**
* @brief Count the number of managed mappings to the given physical
* page that are wired.
*
* @param _pg physical page
*
* @retval int the number of wired, managed mappings to the
* given physical page
*/
METHOD int page_wired_mappings {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Initialise a physical map data structure
*

View File

@ -322,6 +322,7 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
int moea_page_wired_mappings(mmu_t, vm_page_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@ -359,6 +360,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@ -1492,6 +1494,26 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
return (FALSE);
}
/*
* Return the number of managed mappings to the given physical page
* that are wired.
*/
int
moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
{
struct pvo_entry *pvo;
int count;
count = 0;
if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
count++;
return (count);
}
static u_int moea_vsidcontext;
void

View File

@ -193,6 +193,13 @@ pmap_page_init(vm_page_t m)
MMU_PAGE_INIT(mmu_obj, m);
}
int
pmap_page_wired_mappings(vm_page_t m)
{
return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
}
int
pmap_pinit(pmap_t pmap)
{

View File

@ -1752,6 +1752,26 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m)
return (FALSE);
}
/*
* Return the number of managed mappings to the given physical page
* that are wired.
*/
int
pmap_page_wired_mappings(vm_page_t m)
{
struct tte *tp;
int count;
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
count++;
return (count);
}
/*
* Remove all pages from specified address space, this aids process exit
* speeds. This is much faster than pmap_remove n the case of running down

View File

@ -1669,6 +1669,34 @@ pmap_page_init(vm_page_t m)
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* Return the number of managed mappings to the given physical page
* that are wired.
*/
int
pmap_page_wired_mappings(vm_page_t m)
{
pmap_t pmap;
pv_entry_t pv;
uint64_t tte_data;
int count;
count = 0;
if ((m->flags & PG_FICTITIOUS) != 0)
return (count);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = pv->pv_pmap;
PMAP_LOCK(pmap);
tte_data = tte_hash_lookup(pmap->pm_hash, pv->pv_va);
if ((tte_data & VTD_WIRED) != 0)
count++;
PMAP_UNLOCK(pmap);
}
return (count);
}
/*
* Lower the permission for all mappings to a given page.
*/

View File

@ -114,6 +114,7 @@ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
void pmap_page_init(vm_page_t m);
int pmap_page_wired_mappings(vm_page_t m);
int pmap_pinit(pmap_t);
void pmap_pinit0(pmap_t);
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);

View File

@ -1797,6 +1797,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
boolean_t clean_only)
{
vm_page_t p, next;
int wirings;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
if (object->resident_page_count == 0)
@ -1831,8 +1832,16 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
p = next) {
next = TAILQ_NEXT(p, listq);
if (p->wire_count != 0) {
/*
* If the page is wired for any reason besides the
* existence of managed, wired mappings, then it cannot
* be freed.
*/
if ((wirings = p->wire_count) != 0 &&
(wirings = pmap_page_wired_mappings(p)) != p->wire_count) {
pmap_remove_all(p);
/* Account for removal of managed, wired mappings. */
p->wire_count -= wirings;
if (!clean_only)
p->valid = 0;
continue;
@ -1845,6 +1854,9 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
continue;
}
pmap_remove_all(p);
/* Account for removal of managed, wired mappings. */
if (wirings != 0)
p->wire_count -= wirings;
vm_page_free(p);
}
vm_page_unlock_queues();