The synchronization provided by vm object locking has eliminated the

need for most calls to vm_page_busy().  Specifically, most calls to
vm_page_busy() occur immediately prior to a call to vm_page_remove().
In such cases, the containing vm object is locked across both calls.
Consequently, the setting of the vm page's PG_BUSY flag is not even
visible to other threads that are following the synchronization
protocol.

This change (1) eliminates the calls to vm_page_busy() that
immediately precede a call to vm_page_remove() or functions, such as
vm_page_free() and vm_page_rename(), that call it and (2) relaxes the
requirement in vm_page_remove() that the vm page's PG_BUSY flag is
set.  Now, the vm page's PG_BUSY flag is set only when the vm object
lock is released while the vm page is still in transition.  Typically,
this is when it is undergoing I/O.
This commit is contained in:
Alan Cox 2004-11-03 20:17:31 +00:00
parent 51f83da622
commit d19ef81437
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=137168
10 changed files with 5 additions and 30 deletions

View File

@ -97,7 +97,6 @@ vm_pgmoveco(vm_map_t mapa, vm_object_t srcobj, vm_offset_t kaddr,
do
vm_page_lock_queues();
while (vm_page_sleep_if_busy(user_pg, 1, "vm_pgmoveco"));
vm_page_busy(user_pg);
pmap_remove_all(user_pg);
vm_page_free(user_pg);
} else
@ -114,7 +113,6 @@ vm_pgmoveco(vm_map_t mapa, vm_object_t srcobj, vm_offset_t kaddr,
panic("vm_pgmoveco: renaming busy page");
}
kpindex = kern_pg->pindex;
vm_page_busy(kern_pg);
vm_page_rename(kern_pg, uobject, upindex);
vm_page_flag_clear(kern_pg, PG_BUSY);
kern_pg->valid = VM_PAGE_BITS_ALL;

View File

@ -232,7 +232,6 @@ jumbo_pg_free(vm_offset_t addr)
*/
} else {
vm_page_lock_queues();
vm_page_busy(pg); /* vm_page_free wants pages to be busy*/
vm_page_free(pg);
vm_page_unlock_queues();
}

View File

@ -1970,7 +1970,6 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
if (pg->wire_count == 0 && pg->valid == 0 &&
pg->busy == 0 && !(pg->flags & PG_BUSY) &&
pg->hold_count == 0) {
vm_page_busy(pg);
vm_page_free(pg);
}
vm_page_unlock_queues();

View File

@ -1595,7 +1595,6 @@ vfs_vmio_release(struct buf *bp)
*/
if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
m->hold_count == 0) {
vm_page_busy(m);
pmap_remove_all(m);
vm_page_free(m);
} else if (bp->b_flags & B_DIRECT) {
@ -3687,7 +3686,6 @@ vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
bp->b_pages[index] = NULL;
pmap_qremove(pg, 1);
vm_page_lock_queues();
vm_page_busy(p);
vm_page_unwire(p, 0);
vm_page_free(p);
vm_page_unlock_queues();

View File

@ -1087,7 +1087,6 @@ pmap_release(pmap_t pm)
vm_page_lock_queues();
if (vm_page_sleep_if_busy(m, FALSE, "pmaprl"))
continue;
vm_page_busy(m);
KASSERT(m->hold_count == 0,
("pmap_release: freeing held tsb page"));
m->md.pmap = NULL;

View File

@ -256,7 +256,6 @@ contigmalloc1(
start++;
goto again0;
}
vm_page_busy(m);
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
}
@ -459,7 +458,6 @@ vm_page_alloc_contig(vm_pindex_t npages, vm_paddr_t low, vm_paddr_t high,
object = m->object;
if (!VM_OBJECT_TRYLOCK(object))
goto retry;
vm_page_busy(m);
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
}

View File

@ -296,7 +296,6 @@ vm_proc_dispose(struct proc *p)
panic("vm_proc_dispose: incorrect number of pages in upobj");
vm_page_lock_queues();
while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
}
@ -496,7 +495,6 @@ vm_thread_dispose(struct thread *td)
if (m == NULL)
panic("vm_thread_dispose: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();

View File

@ -621,11 +621,9 @@ vm_object_terminate(vm_object_t object)
("vm_object_terminate: freeing busy page %p "
"p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
if (p->wire_count == 0) {
vm_page_busy(p);
vm_page_free(p);
cnt.v_pfree++;
} else {
vm_page_busy(p);
vm_page_remove(p);
}
}
@ -1803,7 +1801,6 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
if (p->valid & p->dirty)
continue;
}
vm_page_busy(p);
pmap_remove_all(p);
vm_page_free(p);
}

View File

@ -609,20 +609,14 @@ vm_page_remove(vm_page_t m)
vm_page_t root;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->object == NULL)
if ((object = m->object) == NULL)
return;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->flags & PG_BUSY) == 0) {
panic("vm_page_remove: page not busy");
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
if (m->flags & PG_BUSY) {
vm_page_flag_clear(m, PG_BUSY);
vm_page_flash(m);
}
/*
* Basically destroy the page.
*/
vm_page_wakeup(m);
object = m->object;
/*
* Now remove from the object's list of backed pages.
*/
@ -810,7 +804,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
}
m_object = m->object;
VM_OBJECT_LOCK_ASSERT(m_object, MA_OWNED);
vm_page_busy(m);
vm_page_free(m);
vm_page_unlock_queues();
if (m_object != object)
@ -1298,7 +1291,6 @@ vm_page_try_to_free(vm_page_t m)
pmap_remove_all(m);
if (m->dirty)
return (0);
vm_page_busy(m);
vm_page_free(m);
return (1);
}
@ -1667,7 +1659,6 @@ vm_page_cowfault(vm_page_t m)
pindex = m->pindex;
retry_alloc:
vm_page_busy(m);
vm_page_remove(m);
mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
if (mnew == NULL) {

View File

@ -823,7 +823,6 @@ vm_pageout_scan(int pass)
/*
* Invalid pages can be easily freed
*/
vm_page_busy(m);
pmap_remove_all(m);
vm_page_free(m);
cnt.v_dfree++;
@ -1107,7 +1106,6 @@ vm_pageout_scan(int pass)
cache_rover = (m->pc + PQ_PRIME2) & PQ_L2_MASK;
object = m->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
vm_page_busy(m);
vm_page_free(m);
VM_OBJECT_UNLOCK(object);
cnt.v_dfree++;