Remove mpte optimization from pmap_enter_quick().
There is a race with the current locking scheme and removing it should have no measurable performance impact. This fixes page faults leading to panics in pmap_enter_quick_locked() on amd64/i386. Reviewed by: alc,jhb,peter,ps
This commit is contained in:
parent
63bddd18cc
commit
b3a7439a45
@ -2356,15 +2356,13 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
* but is *MUCH* faster than pmap_enter...
|
||||
*/
|
||||
|
||||
vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
void
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte);
|
||||
(void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (mpte);
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
|
@ -3572,16 +3572,14 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
* but is *MUCH* faster than pmap_enter...
|
||||
*/
|
||||
|
||||
vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
void
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2432,15 +2432,13 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
* but is *MUCH* faster than pmap_enter...
|
||||
*/
|
||||
|
||||
vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
void
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte);
|
||||
(void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (mpte);
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
|
@ -1665,15 +1665,13 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
* but is *MUCH* faster than pmap_enter...
|
||||
*/
|
||||
|
||||
vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
void
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_quick_locked(pmap, va, m, prot);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -313,8 +313,7 @@ void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
||||
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
vm_page_t moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
vm_page_t);
|
||||
void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
|
||||
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
|
||||
void moea_init(mmu_t);
|
||||
@ -1174,16 +1173,16 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
void
|
||||
moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, vm_page_t mpte)
|
||||
vm_prot_t prot)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pm);
|
||||
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
PMAP_UNLOCK(pm);
|
||||
return (NULL);
|
||||
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
|
@ -227,17 +227,13 @@ METHOD void enter_object {
|
||||
* @param _va mapping virtual address
|
||||
* @param _pg mapping physical page
|
||||
* @param _prot new page protection - used to see if page is exec.
|
||||
* @param _mpte ???
|
||||
*
|
||||
* @retval NULL (possibly a hint for future calls ?)
|
||||
*/
|
||||
METHOD vm_page_t enter_quick {
|
||||
METHOD void enter_quick {
|
||||
mmu_t _mmu;
|
||||
pmap_t _pmap;
|
||||
vm_offset_t _va;
|
||||
vm_page_t _pg;
|
||||
vm_prot_t _prot;
|
||||
vm_page_t _mpte;
|
||||
};
|
||||
|
||||
|
||||
|
@ -313,8 +313,7 @@ void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
||||
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
vm_page_t moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
vm_page_t);
|
||||
void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
|
||||
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
|
||||
void moea_init(mmu_t);
|
||||
@ -1174,16 +1173,16 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
void
|
||||
moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, vm_page_t mpte)
|
||||
vm_prot_t prot)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pm);
|
||||
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
PMAP_UNLOCK(pm);
|
||||
return (NULL);
|
||||
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
|
@ -122,11 +122,10 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
void
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
return (MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot, mpte));
|
||||
MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
|
@ -1443,16 +1443,14 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
vm_page_t mpte)
|
||||
void
|
||||
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
|
||||
PMAP_LOCK(pm);
|
||||
pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
PMAP_UNLOCK(pm);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -97,8 +97,8 @@ void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
|
||||
void pmap_copy_page(vm_page_t, vm_page_t);
|
||||
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
boolean_t);
|
||||
vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, vm_page_t mpte);
|
||||
void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot);
|
||||
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
|
||||
vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
|
||||
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
|
||||
|
@ -953,7 +953,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
||||
int i;
|
||||
vm_offset_t addr, starta;
|
||||
vm_pindex_t pindex;
|
||||
vm_page_t m, mpte;
|
||||
vm_page_t m;
|
||||
vm_object_t object;
|
||||
|
||||
if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
|
||||
@ -968,7 +968,6 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
||||
starta = 0;
|
||||
}
|
||||
|
||||
mpte = NULL;
|
||||
for (i = 0; i < PAGEORDER_SIZE; i++) {
|
||||
vm_object_t backing_object, lobject;
|
||||
|
||||
@ -1009,8 +1008,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
||||
vm_page_lock_queues();
|
||||
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
|
||||
vm_page_deactivate(m);
|
||||
mpte = pmap_enter_quick(pmap, addr, m,
|
||||
entry->protection, mpte);
|
||||
pmap_enter_quick(pmap, addr, m, entry->protection);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
VM_OBJECT_UNLOCK(lobject);
|
||||
|
Loading…
x
Reference in New Issue
Block a user