o Pass VM_ALLOC_WIRED to vm_page_grab() rather than calling vm_page_wire()

in pmap_new_thread(), pmap_pinit(), and vm_proc_new().
 o Lock page queue accesses by vm_page_free() in pmap_object_init_pt().
This commit is contained in:
Alan Cox 2002-07-29 05:42:44 +00:00
parent e6502af979
commit 14f8ceaa07
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=100862
3 changed files with 12 additions and 33 deletions

View File

@ -974,15 +974,10 @@ pmap_new_thread(struct thread *td)
/*
* Get a kernel stack page
*/
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
/*
* Wire the page
*/
m->wire_count++;
cnt.v_wire_count++;
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
@ -1222,12 +1217,7 @@ pmap_pinit(pmap)
* allocate the page directory page
*/
ptdpg = vm_page_grab(pmap->pm_pteobj, PTDPTDI,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
ptdpg->wire_count = 1;
++cnt.v_wire_count;
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY); /* not usually mapped*/
ptdpg->valid = VM_PAGE_BITS_ALL;
@ -2347,7 +2337,9 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
m[0] = p;
if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
return;
}

View File

@ -974,15 +974,10 @@ pmap_new_thread(struct thread *td)
/*
* Get a kernel stack page
*/
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
/*
* Wire the page
*/
m->wire_count++;
cnt.v_wire_count++;
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
@ -1222,12 +1217,7 @@ pmap_pinit(pmap)
* allocate the page directory page
*/
ptdpg = vm_page_grab(pmap->pm_pteobj, PTDPTDI,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
ptdpg->wire_count = 1;
++cnt.v_wire_count;
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY); /* not usually mapped*/
ptdpg->valid = VM_PAGE_BITS_ALL;
@ -2347,7 +2337,9 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
m[0] = p;
if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
return;
}

View File

@ -230,15 +230,10 @@ vm_proc_new(struct proc *p)
/*
* Get a uarea page.
*/
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
m = vm_page_grab(upobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
/*
* Wire the page.
*/
m->wire_count++;
cnt.v_wire_count++;
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);