- Push down Giant from vm_pageout() to vm_pageout_scan(), freeing

vm_pageout_page_stats() from Giant.
 - Modify vm_pager_put_pages() and vm_pager_page_unswapped() to expect the
   vm object to be locked on entry.  (All of the pager routines now expect
   this.)
This commit is contained in:
alc 2003-10-24 06:43:04 +00:00
parent 2a08abfe7d
commit a71ff79234
4 changed files with 14 additions and 12 deletions

View File

@ -1187,6 +1187,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
m[0]->object
);
}
VM_OBJECT_UNLOCK(object);
/*
* Step 1
*
@ -1368,6 +1369,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
swp_pager_async_iodone(bp);
splx(s);
}
VM_OBJECT_LOCK(object);
}
/*
@ -1652,13 +1654,13 @@ swp_pager_force_pagein(struct swblock *swap, int idx)
m = vm_page_grab(object, pindex + idx, VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
if (m->valid == VM_PAGE_BITS_ALL) {
vm_object_pip_subtract(object, 1);
VM_OBJECT_UNLOCK(object);
vm_page_lock_queues();
vm_page_activate(m);
vm_page_dirty(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
vm_pager_page_unswapped(m);
VM_OBJECT_UNLOCK(object);
return;
}
@ -1666,14 +1668,13 @@ swp_pager_force_pagein(struct swblock *swap, int idx)
VM_PAGER_OK)
panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
vm_object_pip_subtract(object, 1);
VM_OBJECT_UNLOCK(object);
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_dontneed(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
vm_pager_page_unswapped(m);
VM_OBJECT_UNLOCK(object);
}

View File

@ -362,12 +362,13 @@ vm_pageout_clean(m)
int
vm_pageout_flush(vm_page_t *mc, int count, int flags)
{
vm_object_t object;
vm_object_t object = mc[0]->object;
int pageout_status[count];
int numpagedout = 0;
int i;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
/*
* Initiate I/O. Bump the vm_page_t->busy counter and
* mark the pages read-only.
@ -385,16 +386,13 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
vm_page_io_start(mc[i]);
pmap_page_protect(mc[i], VM_PROT_READ);
}
object = mc[0]->object;
vm_page_unlock_queues();
vm_object_pip_add(object, count);
VM_OBJECT_UNLOCK(object);
vm_pager_put_pages(object, mc, count,
(flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
pageout_status);
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
for (i = 0; i < count; i++) {
vm_page_t mt = mc[i];
@ -669,7 +667,7 @@ vm_pageout_scan(int pass)
int s;
struct thread *td;
GIANT_REQUIRED;
mtx_lock(&Giant);
/*
* Decrease registered cache sizes.
*/
@ -1224,6 +1222,7 @@ vm_pageout_scan(int pass)
wakeup(&cnt.v_free_count);
}
}
mtx_unlock(&Giant);
}
/*
@ -1324,8 +1323,6 @@ vm_pageout()
{
int error, pass, s;
mtx_lock(&Giant);
/*
* Initialize some paging parameters.
*/

View File

@ -141,7 +141,8 @@ vm_pager_put_pages(
int flags,
int *rtvals
) {
GIANT_REQUIRED;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
(*pagertab[object->type]->pgo_putpages)
(object, m, count, flags, rtvals);
}
@ -186,7 +187,8 @@ vm_pager_has_page(
static __inline void
vm_pager_page_unswapped(vm_page_t m)
{
GIANT_REQUIRED;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if (pagertab[m->object->type]->pgo_pageunswapped)
(*pagertab[m->object->type]->pgo_pageunswapped)(m);
}

View File

@ -953,6 +953,7 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
* Call device-specific putpages function
*/
vp = object->handle;
VM_OBJECT_UNLOCK(object);
if (vp->v_type != VREG)
mp = NULL;
(void)vn_start_write(vp, &mp, V_WAIT);
@ -960,6 +961,7 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: stale FS putpages\n"));
vn_finished_write(mp);
VM_OBJECT_LOCK(object);
}