Pass a value of type vm_prot_t to pmap_enter_quick() so that it determine

whether the mapping should permit execute access.
This commit is contained in:
Alan Cox 2005-09-03 18:20:20 +00:00
parent e17c0e3256
commit ba8bca610c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=149768
12 changed files with 32 additions and 24 deletions

View File

@ -1787,12 +1787,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
register pt_entry_t *pte;
int managed;

View File

@ -2036,12 +2036,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
pt_entry_t *pte;
vm_paddr_t pa;
@ -2130,6 +2130,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap->pm_stats.resident_count++;
pa = VM_PAGE_TO_PHYS(m);
if ((prot & VM_PROT_EXECUTE) == 0)
pa |= pg_nx;
/*
* Now validate mapping with RO protection

View File

@ -3536,19 +3536,19 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
pmap_enter(pmap, va, m, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
pmap_enter(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
pmap_idcache_wbinv_all(pmap);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);

View File

@ -2026,12 +2026,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
pt_entry_t *pte;
vm_paddr_t pa;

View File

@ -1570,12 +1570,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
struct ia64_lpte *pte;
pmap_t oldpmap;
@ -1613,7 +1613,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
/* Initialise with R/O protection and enter into VHPT. */
pmap_enter_vhpt(pte, va);
pmap_pte_prot(pmap, pte, VM_PROT_READ);
pmap_pte_prot(pmap, pte,
prot & (VM_PROT_READ | VM_PROT_EXECUTE));
pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
}

View File

@ -1069,14 +1069,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();

View File

@ -1069,14 +1069,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();

View File

@ -1069,14 +1069,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();

View File

@ -1401,13 +1401,14 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
pmap_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);

View File

@ -100,7 +100,7 @@ void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t);
vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_page_t mpte);
vm_prot_t prot, vm_page_t mpte);
vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va);
vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va,
vm_prot_t prot);

View File

@ -1007,7 +1007,8 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
vm_page_lock_queues();
if ((m->queue - m->pc) == PQ_CACHE)
vm_page_deactivate(m);
mpte = pmap_enter_quick(pmap, addr, m, mpte);
mpte = pmap_enter_quick(pmap, addr, m,
entry->protection, mpte);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(lobject);

View File

@ -1375,7 +1375,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_page_t p, mpte;
boolean_t are_queues_locked;
if ((prot & VM_PROT_READ) == 0 || object == NULL)
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
return;
VM_OBJECT_LOCK(object);
if (object->type == OBJT_DEVICE) {
@ -1433,7 +1433,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
mpte = pmap_enter_quick(map->pmap,
addr + ptoa(tmpidx), p, mpte);
addr + ptoa(tmpidx), p, prot, mpte);
}
}
if (are_queues_locked)