Add vm object locking to various pagers' "get pages" methods, i386 stack

management functions, and a u area management function.
This commit is contained in:
Alan Cox 2003-06-13 03:02:28 +00:00
parent d7522df29c
commit 8630c1173e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=116279
7 changed files with 25 additions and 9 deletions

View File

@ -1038,6 +1038,7 @@ pmap_new_thread(struct thread *td, int pages)
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
@ -1052,6 +1053,7 @@ pmap_new_thread(struct thread *td, int pages)
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
}
@ -1073,6 +1075,7 @@ pmap_dispose_thread(td)
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, pages);
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
@ -1083,6 +1086,7 @@ pmap_dispose_thread(td)
vm_page_free(m);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
/*
* Free the space that this stack was mapped to in the kernel
* address map.
@ -1142,6 +1146,7 @@ pmap_swapout_thread(td)
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, pages);
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
@ -1151,6 +1156,7 @@ pmap_swapout_thread(td)
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
}
/*
@ -1170,6 +1176,7 @@ pmap_swapin_thread(td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
@ -1185,6 +1192,7 @@ pmap_swapin_thread(td)
vm_page_wakeup(m);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
}

View File

@ -777,9 +777,7 @@ exec_map_first_page(imgp)
}
}
initial_pagein = i;
VM_OBJECT_UNLOCK(object); /* XXX */
rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
VM_OBJECT_LOCK(object); /* XXX */
ma[0] = vm_page_lookup(object, 0);
vm_page_lock_queues();
if ((rv != VM_PAGER_OK) || (ma[0] == NULL) ||

View File

@ -212,9 +212,9 @@ dev_pager_getpages(object, m, count, reqpage)
d_mmap_t *mapfunc;
int prot;
mtx_assert(&Giant, MA_OWNED);
dev = object->handle;
offset = m[reqpage]->pindex;
VM_OBJECT_UNLOCK(object);
prot = PROT_READ; /* XXX should pass in? */
mapfunc = devsw(dev)->d_mmap;
@ -228,6 +228,7 @@ dev_pager_getpages(object, m, count, reqpage)
* free up the all of the original pages.
*/
page = dev_pager_getfake(paddr);
VM_OBJECT_LOCK(object);
TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq);
vm_page_lock_queues();
for (i = 0; i < count; i++)

View File

@ -1006,8 +1006,6 @@ swap_pager_getpages(object, m, count, reqpage)
daddr_t blk;
vm_pindex_t lastpindex;
GIANT_REQUIRED;
mreq = m[reqpage];
if (mreq->object != object) {
@ -1074,6 +1072,10 @@ swap_pager_getpages(object, m, count, reqpage)
if (blk == SWAPBLK_NONE)
return (VM_PAGER_FAIL);
/*
* Getpbuf() can sleep.
*/
VM_OBJECT_UNLOCK(object);
/*
* Get a swap buffer header to perform the IO
*/
@ -1095,6 +1097,7 @@ swap_pager_getpages(object, m, count, reqpage)
bp->b_bufsize = PAGE_SIZE * (j - i);
bp->b_pager.pg_reqpage = reqpage - i;
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
{
int k;
@ -1105,6 +1108,7 @@ swap_pager_getpages(object, m, count, reqpage)
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
bp->b_npages = j - i;
pbgetvp(swapdev_vp, bp);
@ -1157,6 +1161,7 @@ swap_pager_getpages(object, m, count, reqpage)
vm_page_unlock_queues();
splx(s);
VM_OBJECT_LOCK(mreq->object);
/*
* mreq is left busied after completion, but all the other pages
* are freed. If we had an unrecoverable read error the page will

View File

@ -322,6 +322,7 @@ vm_proc_swapin(struct proc *p)
int i;
upobj = p->p_upages_obj;
VM_OBJECT_LOCK(upobj);
for (i = 0; i < UAREA_PAGES; i++) {
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
@ -331,7 +332,6 @@ vm_proc_swapin(struct proc *p)
}
ma[i] = m;
}
VM_OBJECT_LOCK(upobj);
if (upobj->resident_page_count != UAREA_PAGES)
panic("vm_proc_swapin: lost pages from upobj");
vm_page_lock_queues();

View File

@ -119,14 +119,17 @@ vm_pager_get_pages(
int count,
int reqpage
) {
int is_object_locked;
int r;
GIANT_REQUIRED;
if (!(is_object_locked = VM_OBJECT_LOCKED(object)))
VM_OBJECT_LOCK(object);
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
vm_page_zero_invalid(m[reqpage], TRUE);
}
if (!is_object_locked)
VM_OBJECT_UNLOCK(object);
return (r);
}

View File

@ -623,11 +623,12 @@ vnode_pager_getpages(object, m, count, reqpage)
struct vnode *vp;
int bytes = count * PAGE_SIZE;
GIANT_REQUIRED;
vp = object->handle;
VM_OBJECT_UNLOCK(object);
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: FS getpages not implemented\n"));
VM_OBJECT_LOCK(object);
return rtval;
}