Push down the page queues lock inside of vm_page_free_toq() and

pmap_page_is_mapped() in preparation for removing page queues locking
around calls to vm_page_free().  Setting aside the assertion that calls
pmap_page_is_mapped(), vm_page_free_toq() now acquires and holds the page
queues lock just long enough to actually add or remove the page from the
paging queues.

Update vm_page_unhold() to reflect the above change.
This commit is contained in:
Alan Cox 2010-05-06 16:39:43 +00:00
parent b277fb56f9
commit 7024db1d40
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207702
5 changed files with 36 additions and 30 deletions

View File

@ -3961,16 +3961,15 @@ pmap_pvh_wired_mappings(struct md_page *pvh, int count)
boolean_t
pmap_page_is_mapped(vm_page_t m)
{
struct md_page *pvh;
boolean_t rv;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
return (!TAILQ_EMPTY(&pvh->pv_list));
} else
return (TRUE);
vm_page_lock_queues();
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
vm_page_unlock_queues();
return (rv);
}
/*

View File

@ -4125,16 +4125,15 @@ pmap_pvh_wired_mappings(struct md_page *pvh, int count)
boolean_t
pmap_page_is_mapped(vm_page_t m)
{
struct md_page *pvh;
boolean_t rv;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (TAILQ_EMPTY(&m->md.pv_list)) {
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
return (!TAILQ_EMPTY(&pvh->pv_list));
} else
return (TRUE);
vm_page_lock_queues();
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
!TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
vm_page_unlock_queues();
return (rv);
}
/*

View File

@ -1834,14 +1834,19 @@ boolean_t
pmap_page_is_mapped(vm_page_t m)
{
struct tte *tp;
boolean_t rv;
rv = FALSE;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
return (FALSE);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
return (rv);
vm_page_lock_queues();
TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
if ((tp->tte_data & TD_PV) != 0)
return (TRUE);
return (FALSE);
if ((tp->tte_data & TD_PV) != 0) {
rv = TRUE;
break;
}
vm_page_unlock_queues();
return (rv);
}
/*

View File

@ -563,11 +563,8 @@ vm_page_unhold(vm_page_t mem)
vm_page_lock_assert(mem, MA_OWNED);
--mem->hold_count;
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD)) {
vm_page_lock_queues();
if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD))
vm_page_free_toq(mem);
vm_page_unlock_queues();
}
}
/*
@ -1448,10 +1445,11 @@ void
vm_page_free_toq(vm_page_t m)
{
if (VM_PAGE_GETQUEUE(m) != PQ_NONE)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_toq: freeing mapped page %p", m));
if ((m->flags & PG_UNMANAGED) == 0) {
vm_page_lock_assert(m, MA_OWNED);
KASSERT(!pmap_page_is_mapped(m),
("vm_page_free_toq: freeing mapped page %p", m));
}
PCPU_INC(cnt.v_tfree);
if (m->busy || VM_PAGE_IS_FREE(m)) {
@ -1471,7 +1469,11 @@ vm_page_free_toq(vm_page_t m)
* callback routine until after we've put the page on the
* appropriate free queue.
*/
vm_pageq_remove(m);
if (VM_PAGE_GETQUEUE(m) != PQ_NONE) {
vm_page_lock_queues();
vm_pageq_remove(m);
vm_page_unlock_queues();
}
vm_page_remove(m);
/*
@ -1493,9 +1495,10 @@ vm_page_free_toq(vm_page_t m)
panic("vm_page_free: freeing wired page");
}
if (m->hold_count != 0) {
vm_page_lock_assert(m, MA_OWNED);
m->flags &= ~PG_ZERO;
vm_page_lock_queues();
vm_page_enqueue(PQ_HOLD, m);
vm_page_unlock_queues();
} else {
/*
* Restore the default memory attribute to the page.

View File

@ -107,7 +107,7 @@ struct vm_page {
vm_pindex_t pindex; /* offset into object (O,Q) */
vm_paddr_t phys_addr; /* physical address of page */
struct md_page md; /* machine dependant stuff */
uint8_t queue; /* page queue index */
uint8_t queue; /* page queue index (P,Q) */
int8_t segind;
u_short flags; /* see below */
uint8_t order; /* index of the buddy queue */