Add a vm_page_change_lock() helper, the common code to not relock page
lock if both old and new pages use the same underlying lock. Convert existing places to use the helper instead of inlining it. Use the optimization in vm_object_page_remove(). Suggested and reviewed by: alc, markj Sponsored by: The FreeBSD Foundation MFC after: 1 week
This commit is contained in:
parent
fa1f526510
commit
d27f2b11c2
@ -1917,6 +1917,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
|
||||
int options)
|
||||
{
|
||||
vm_page_t p, next;
|
||||
struct mtx *mtx;
|
||||
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
|
||||
@ -1927,6 +1928,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
|
||||
vm_object_pip_add(object, 1);
|
||||
again:
|
||||
p = vm_page_find_least(object, start);
|
||||
mtx = NULL;
|
||||
|
||||
/*
|
||||
* Here, the variable "p" is either (1) the page with the least pindex
|
||||
@ -1943,7 +1945,7 @@ again:
|
||||
* however, be invalidated if the option OBJPR_CLEANONLY is
|
||||
* not specified.
|
||||
*/
|
||||
vm_page_lock(p);
|
||||
vm_page_change_lock(p, &mtx);
|
||||
if (vm_page_xbusied(p)) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_page_busy_sleep(p, "vmopax", true);
|
||||
@ -1957,7 +1959,7 @@ again:
|
||||
p->valid = 0;
|
||||
vm_page_undirty(p);
|
||||
}
|
||||
goto next;
|
||||
continue;
|
||||
}
|
||||
if (vm_page_busied(p)) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
@ -1971,14 +1973,14 @@ again:
|
||||
if ((options & OBJPR_NOTMAPPED) == 0)
|
||||
pmap_remove_write(p);
|
||||
if (p->dirty)
|
||||
goto next;
|
||||
continue;
|
||||
}
|
||||
if ((options & OBJPR_NOTMAPPED) == 0)
|
||||
pmap_remove_all(p);
|
||||
vm_page_free(p);
|
||||
next:
|
||||
vm_page_unlock(p);
|
||||
}
|
||||
if (mtx != NULL)
|
||||
mtx_unlock(mtx);
|
||||
vm_object_pip_wakeup(object);
|
||||
}
|
||||
|
||||
@ -2001,7 +2003,7 @@ next:
|
||||
void
|
||||
vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||
{
|
||||
struct mtx *mtx, *new_mtx;
|
||||
struct mtx *mtx;
|
||||
vm_page_t p, next;
|
||||
|
||||
VM_OBJECT_ASSERT_LOCKED(object);
|
||||
@ -2018,17 +2020,7 @@ vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||
mtx = NULL;
|
||||
for (; p != NULL && (p->pindex < end || end == 0); p = next) {
|
||||
next = TAILQ_NEXT(p, listq);
|
||||
|
||||
/*
|
||||
* Avoid releasing and reacquiring the same page lock.
|
||||
*/
|
||||
new_mtx = vm_page_lockptr(p);
|
||||
if (mtx != new_mtx) {
|
||||
if (mtx != NULL)
|
||||
mtx_unlock(mtx);
|
||||
mtx = new_mtx;
|
||||
mtx_lock(mtx);
|
||||
}
|
||||
vm_page_change_lock(p, &mtx);
|
||||
vm_page_deactivate_noreuse(p);
|
||||
}
|
||||
if (mtx != NULL)
|
||||
|
@ -937,6 +937,23 @@ vm_page_flash(vm_page_t m)
|
||||
wakeup(m);
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid releasing and reacquiring the same page lock.
|
||||
*/
|
||||
void
|
||||
vm_page_change_lock(vm_page_t m, struct mtx **mtx)
|
||||
{
|
||||
struct mtx *mtx1;
|
||||
|
||||
mtx1 = vm_page_lockptr(m);
|
||||
if (*mtx == mtx1)
|
||||
return;
|
||||
if (*mtx != NULL)
|
||||
mtx_unlock(*mtx);
|
||||
*mtx = mtx1;
|
||||
mtx_lock(mtx1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep page from being freed by the page daemon
|
||||
* much of the same effect as wiring, except much lower
|
||||
@ -970,20 +987,11 @@ vm_page_unhold(vm_page_t mem)
|
||||
void
|
||||
vm_page_unhold_pages(vm_page_t *ma, int count)
|
||||
{
|
||||
struct mtx *mtx, *new_mtx;
|
||||
struct mtx *mtx;
|
||||
|
||||
mtx = NULL;
|
||||
for (; count != 0; count--) {
|
||||
/*
|
||||
* Avoid releasing and reacquiring the same page lock.
|
||||
*/
|
||||
new_mtx = vm_page_lockptr(*ma);
|
||||
if (mtx != new_mtx) {
|
||||
if (mtx != NULL)
|
||||
mtx_unlock(mtx);
|
||||
mtx = new_mtx;
|
||||
mtx_lock(mtx);
|
||||
}
|
||||
vm_page_change_lock(*ma, &mtx);
|
||||
vm_page_unhold(*ma);
|
||||
ma++;
|
||||
}
|
||||
@ -2023,7 +2031,7 @@ vm_page_t
|
||||
vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
|
||||
u_long alignment, vm_paddr_t boundary, int options)
|
||||
{
|
||||
struct mtx *m_mtx, *new_mtx;
|
||||
struct mtx *m_mtx;
|
||||
vm_object_t object;
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m, m_run;
|
||||
@ -2066,16 +2074,7 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
|
||||
} else
|
||||
KASSERT(m_run != NULL, ("m_run == NULL"));
|
||||
|
||||
/*
|
||||
* Avoid releasing and reacquiring the same page lock.
|
||||
*/
|
||||
new_mtx = vm_page_lockptr(m);
|
||||
if (m_mtx != new_mtx) {
|
||||
if (m_mtx != NULL)
|
||||
mtx_unlock(m_mtx);
|
||||
m_mtx = new_mtx;
|
||||
mtx_lock(m_mtx);
|
||||
}
|
||||
vm_page_change_lock(m, &m_mtx);
|
||||
m_inc = 1;
|
||||
retry:
|
||||
if (m->wire_count != 0 || m->hold_count != 0)
|
||||
@ -2225,7 +2224,7 @@ static int
|
||||
vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
|
||||
vm_paddr_t high)
|
||||
{
|
||||
struct mtx *m_mtx, *new_mtx;
|
||||
struct mtx *m_mtx;
|
||||
struct spglist free;
|
||||
vm_object_t object;
|
||||
vm_paddr_t pa;
|
||||
@ -2246,13 +2245,7 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
|
||||
/*
|
||||
* Avoid releasing and reacquiring the same page lock.
|
||||
*/
|
||||
new_mtx = vm_page_lockptr(m);
|
||||
if (m_mtx != new_mtx) {
|
||||
if (m_mtx != NULL)
|
||||
mtx_unlock(m_mtx);
|
||||
m_mtx = new_mtx;
|
||||
mtx_lock(m_mtx);
|
||||
}
|
||||
vm_page_change_lock(m, &m_mtx);
|
||||
retry:
|
||||
if (m->wire_count != 0 || m->hold_count != 0)
|
||||
error = EBUSY;
|
||||
@ -2365,12 +2358,7 @@ retry:
|
||||
* The new page must be deactivated
|
||||
* before the object is unlocked.
|
||||
*/
|
||||
new_mtx = vm_page_lockptr(m_new);
|
||||
if (m_mtx != new_mtx) {
|
||||
mtx_unlock(m_mtx);
|
||||
m_mtx = new_mtx;
|
||||
mtx_lock(m_mtx);
|
||||
}
|
||||
vm_page_change_lock(m_new, &m_mtx);
|
||||
vm_page_deactivate(m_new);
|
||||
} else {
|
||||
m->flags &= ~PG_ZERO;
|
||||
|
@ -470,6 +470,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
|
||||
vm_paddr_t boundary, vm_memattr_t memattr);
|
||||
vm_page_t vm_page_alloc_freelist(int, int);
|
||||
void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
|
||||
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
|
||||
int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
|
||||
vm_page_t *ma, int count);
|
||||
|
Loading…
x
Reference in New Issue
Block a user