o Don't set PG_MAPPED or PG_WRITEABLE when a page is mapped

using pmap_kenter() or pmap_qenter().
 o Use VM_ALLOC_WIRED in pmap_new_thread().
This commit is contained in:
Alan Cox 2002-08-05 00:04:18 +00:00
parent c939f1aee7
commit 7ffcf9ec77
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=101346
5 changed files with 10 additions and 39 deletions

View File

@ -949,13 +949,8 @@ pmap_new_thread(struct thread *td)
/*
* Get a kernel stack page
*/
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
/*
* Wire the page
*/
m->wire_count++;
cnt.v_wire_count++;
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
/*
* Enter the page into the kernel address space.
@ -968,7 +963,6 @@ pmap_new_thread(struct thread *td)
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
m->valid = VM_PAGE_BITS_ALL;
}
}
@ -1073,7 +1067,6 @@ pmap_swapin_thread(td)
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
vm_page_unlock_queues();
}

View File

@ -1532,12 +1532,8 @@ pmap_new_thread(struct thread *td)
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
/*
* Wire the page.
*/
m->wire_count++;
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
/*
* Enter the page into the kernel address space.
@ -1546,7 +1542,6 @@ pmap_new_thread(struct thread *td)
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
m->valid = VM_PAGE_BITS_ALL;
}
}

View File

@ -1532,12 +1532,8 @@ pmap_new_thread(struct thread *td)
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
/*
* Wire the page.
*/
m->wire_count++;
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
/*
* Enter the page into the kernel address space.
@ -1546,7 +1542,6 @@ pmap_new_thread(struct thread *td)
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
m->valid = VM_PAGE_BITS_ALL;
}
}

View File

@ -1532,12 +1532,8 @@ pmap_new_thread(struct thread *td)
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
/*
* Wire the page.
*/
m->wire_count++;
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
/*
* Enter the page into the kernel address space.
@ -1546,7 +1542,6 @@ pmap_new_thread(struct thread *td)
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
m->valid = VM_PAGE_BITS_ALL;
}
}

View File

@ -948,18 +948,12 @@ pmap_new_thread(struct thread *td)
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
/*
* Wire the page.
*/
m->wire_count++;
cnt.v_wire_count++;
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
m->valid = VM_PAGE_BITS_ALL;
}
@ -1052,7 +1046,6 @@ pmap_swapin_thread(struct thread *td)
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, KSTACK_PAGES);