Add an access type parameter to pmap_enter(). It will be used to implement

superpage promotion.

Correct a style error in kmem_malloc(): pmap_enter()'s last parameter is
a Boolean.
This commit is contained in:
Alan Cox 2008-01-03 07:34:34 +00:00
parent cd093614f3
commit eb2a051720
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=175067
10 changed files with 22 additions and 20 deletions

View File

@ -2250,8 +2250,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* insert this page into the given map NOW.
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
vm_paddr_t pa;
pd_entry_t *pde;

View File

@ -3324,8 +3324,8 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
vm_page_lock_queues();

View File

@ -2302,8 +2302,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* insert this page into the given map NOW.
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
vm_paddr_t pa;
pd_entry_t *pde;

View File

@ -1555,8 +1555,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* insert this page into the given map NOW.
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
pmap_t oldpmap;
vm_offset_t pa;

View File

@ -107,8 +107,8 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
}
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
boolean_t wired)
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
vm_prot_t prot, boolean_t wired)
{
MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
}

View File

@ -1273,8 +1273,8 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* will be wired down.
*/
void
pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
vm_page_lock_queues();

View File

@ -1039,8 +1039,8 @@ pmap_add_tte(pmap_t pmap, vm_offset_t va, vm_page_t m, tte_t *tte_data, int wire
* will be wired down.
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
vm_paddr_t pa, opa;
uint64_t tte_data, otte_data;

View File

@ -95,8 +95,8 @@ void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
void pmap_enter(pmap_t, vm_offset_t, vm_prot_t, vm_page_t,
vm_prot_t, boolean_t);
void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot);
void pmap_enter_object(pmap_t pmap, vm_offset_t start,

View File

@ -888,7 +888,7 @@ RetryFault:;
* back on the active queue until later so that the pageout daemon
* won't find it (yet).
*/
pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
}
@ -1177,9 +1177,10 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
VM_OBJECT_UNLOCK(dst_object);
/*
* Enter it in the pmap...
* Enter it in the pmap as a read and/or execute access.
*/
pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
pmap_enter(dst_map->pmap, vaddr, prot & ~VM_PROT_WRITE, dst_m,
prot, FALSE);
/*
* Mark it no longer busy, and put it on the active list.

View File

@ -406,7 +406,8 @@ kmem_malloc(map, size, flags)
/*
* Because this is kernel_pmap, this call will not block.
*/
pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
TRUE);
vm_page_wakeup(m);
}
VM_OBJECT_UNLOCK(kmem_object);