Performance optimizations. One of which was meant to go in before the

previous snap.  Specifically, kern_exit and kern_exec now makes a
call into the pmap module to do a very fast removal of pages from the
address space.  Additionally, the pmap module now updates the PG_MAPPED
and PG_WRITABLE flags.  This is an optional optimization, but helpful
on the X86.
This commit is contained in:
John Dyson 1996-10-12 21:35:25 +00:00
parent da2186afa3
commit 9d3fbbb5f4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=18897
7 changed files with 103 additions and 249 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.123 1996/10/09 19:47:19 bde Exp $
* $Id: pmap.c,v 1.124 1996/10/12 20:36:15 bde Exp $
*/
/*
@ -70,6 +70,9 @@
#include "opt_cpu.h"
#define PMAP_LOCK 1
#define PMAP_PVLIST 1
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -204,9 +207,6 @@ static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex));
static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex));
static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
static PMAP_INLINE void pmap_lock __P((pmap_t pmap));
static PMAP_INLINE void pmap_unlock __P((pmap_t pmap));
static void pmap_lock2 __P((pmap_t pmap1, pmap_t pmap2));
static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
#define PDSTACKMAX 6
@ -259,7 +259,9 @@ pmap_bootstrap(firstaddr, loadaddr)
kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD);
kernel_pmap->pm_count = 1;
#if PMAP_PVLIST
TAILQ_INIT(&kernel_pmap->pm_pvlist);
#endif
nkpt = NKPT;
/*
@ -434,58 +436,6 @@ invltlb_2pg( vm_offset_t va1, vm_offset_t va2) {
}
}
static PMAP_INLINE void
pmap_lock(pmap)
pmap_t pmap;
{
int s;
if (pmap == kernel_pmap)
return;
s = splhigh();
while (pmap->pm_flags & PM_FLAG_LOCKED) {
pmap->pm_flags |= PM_FLAG_WANTED;
tsleep(pmap, PVM - 1, "pmaplk", 0);
}
splx(s);
}
static PMAP_INLINE void
pmap_unlock(pmap)
pmap_t pmap;
{
int s;
if (pmap == kernel_pmap)
return;
s = splhigh();
pmap->pm_flags &= ~PM_FLAG_LOCKED;
if (pmap->pm_flags & PM_FLAG_WANTED) {
pmap->pm_flags &= ~PM_FLAG_WANTED;
wakeup(pmap);
}
}
static void
pmap_lock2(pmap1, pmap2)
pmap_t pmap1, pmap2;
{
int s;
if (pmap1 == kernel_pmap || pmap2 == kernel_pmap)
return;
s = splhigh();
while ((pmap1->pm_flags | pmap2->pm_flags) & PM_FLAG_LOCKED) {
while (pmap1->pm_flags & PM_FLAG_LOCKED) {
pmap1->pm_flags |= PM_FLAG_WANTED;
tsleep(pmap1, PVM - 1, "pmapl1", 0);
}
while (pmap2->pm_flags & PM_FLAG_LOCKED) {
pmap2->pm_flags |= PM_FLAG_WANTED;
tsleep(pmap2, PVM - 1, "pmapl2", 0);
}
}
splx(s);
}
static unsigned *
get_ptbase(pmap)
pmap_t pmap;
@ -566,15 +516,12 @@ pmap_extract(pmap, va)
vm_offset_t va;
{
vm_offset_t rtval;
pmap_lock(pmap);
if (pmap && *pmap_pde(pmap, va)) {
unsigned *pte;
pte = get_ptbase(pmap) + i386_btop(va);
rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
pmap_unlock(pmap);
return rtval;
}
pmap_unlock(pmap);
return 0;
}
@ -871,7 +818,9 @@ pmap_pinit(pmap)
pmap->pm_flags = 0;
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
#if PMAP_PVLIST
TAILQ_INIT(&pmap->pm_pvlist);
#endif
}
static int
@ -1081,10 +1030,11 @@ pmap_release(pmap)
vm_page_t p,n,ptdpg;
vm_object_t object = pmap->pm_pteobj;
#if defined(DIAGNOSTIC)
if (object->ref_count != 1)
panic("pmap_release: pteobj reference count != 1");
#endif
pmap_lock(pmap);
ptdpg = NULL;
retry:
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
@ -1336,13 +1286,16 @@ pmap_remove_entry(pmap, ppv, va)
int s;
s = splvm();
#if PMAP_PVLIST
if (ppv->pv_list_count < pmap->pm_stats.resident_count) {
#endif
for (pv = TAILQ_FIRST(&ppv->pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
break;
}
#if PMAP_PVLIST
} else {
for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
pv;
@ -1351,13 +1304,20 @@ pmap_remove_entry(pmap, ppv, va)
break;
}
}
#endif
rtval = 0;
if (pv) {
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
--ppv->pv_list_count;
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
}
#if PMAP_PVLIST
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
#endif
free_pv_entry(pv);
}
@ -1387,7 +1347,9 @@ pmap_insert_entry(pmap, va, mpte, pa)
pv->pv_pmap = pmap;
pv->pv_ptem = mpte;
#if PMAP_PVLIST
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
#endif
ppv = pa_to_pvh(pa);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
@ -1481,7 +1443,6 @@ pmap_remove(pmap, sva, eva)
if (pmap == NULL)
return;
pmap_lock(pmap);
/*
* special handling of removing one page. a very
* common operation and easy to short circuit some
@ -1489,7 +1450,6 @@ pmap_remove(pmap, sva, eva)
*/
if ((sva + PAGE_SIZE) == eva) {
pmap_remove_page(pmap, sva);
pmap_unlock(pmap);
return;
}
@ -1545,7 +1505,6 @@ pmap_remove(pmap, sva, eva)
if (anyvalid) {
invltlb();
}
pmap_unlock(pmap);
}
/*
@ -1587,7 +1546,6 @@ pmap_remove_all(pa)
s = splvm();
ppv = pa_to_pvh(pa);
while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
pv->pv_pmap->pm_stats.resident_count--;
@ -1614,13 +1572,16 @@ pmap_remove_all(pa)
update_needed = 1;
}
#if PMAP_PVLIST
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
#endif
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
--ppv->pv_list_count;
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
pmap_unlock(pv->pv_pmap);
free_pv_entry(pv);
}
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
if (update_needed)
invltlb();
@ -1656,7 +1617,6 @@ pmap_protect(pmap, sva, eva, prot)
return;
}
pmap_lock(pmap);
anychanged = 0;
ptbase = get_ptbase(pmap);
@ -1697,7 +1657,6 @@ pmap_protect(pmap, sva, eva, prot)
}
}
}
pmap_unlock(pmap);
if (anychanged)
invltlb();
}
@ -1730,7 +1689,6 @@ pmap_enter(pmap, va, pa, prot, wired)
if (pmap == NULL)
return;
pmap_lock(pmap);
va &= PG_FRAME;
#ifdef PMAP_DIAGNOSTIC
if (va > VM_MAX_KERNEL_ADDRESS)
@ -1849,7 +1807,6 @@ pmap_enter(pmap, va, pa, prot, wired)
if (origpte)
invltlb_1pg(va);
}
pmap_unlock(pmap);
}
/*
@ -1981,7 +1938,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
return;
}
pmap_lock(pmap);
if (psize + pindex > object->size)
psize = object->size - pindex;
@ -2040,7 +1996,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
}
}
}
pmap_unlock(pmap);
return;
}
@ -2077,7 +2032,6 @@ pmap_prefault(pmap, addra, entry, object)
if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap))
return;
pmap_lock(pmap);
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
@ -2132,7 +2086,6 @@ pmap_prefault(pmap, addra, entry, object)
PAGE_WAKEUP(m);
}
}
pmap_unlock(pmap);
}
/*
@ -2153,7 +2106,6 @@ pmap_change_wiring(pmap, va, wired)
if (pmap == NULL)
return;
pmap_lock(pmap);
pte = pmap_pte(pmap, va);
if (wired && !pmap_pte_w(pte))
@ -2166,7 +2118,6 @@ pmap_change_wiring(pmap, va, wired)
* invalidate TLB.
*/
pmap_pte_set_w(pte, wired);
pmap_unlock(pmap);
}
@ -2194,11 +2145,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
if (dst_addr != src_addr)
return;
pmap_lock2(src_pmap, dst_pmap);
src_frame = ((unsigned) src_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
if (src_frame != (((unsigned) PTDpde) & PG_FRAME)) {
pmap_unlock(src_pmap);
pmap_unlock(dst_pmap);
return;
}
@ -2268,8 +2216,6 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
++dst_pte;
}
}
pmap_unlock(src_pmap);
pmap_unlock(dst_pmap);
}
/*
@ -2402,6 +2348,8 @@ pmap_remove_pages(pmap, sva, eva)
pv_entry_t pv, npv;
int s;
#if PMAP_PVLIST
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) {
printf("warning: pmap_remove_pages called with non-current pmap\n");
@ -2409,9 +2357,7 @@ pmap_remove_pages(pmap, sva, eva)
}
#endif
pmap_lock(pmap);
s = splhigh();
s = splvm();
for(pv = TAILQ_FIRST(&pmap->pm_pvlist);
pv;
pv = npv) {
@ -2431,30 +2377,32 @@ pmap_remove_pages(pmap, sva, eva)
ppv = pa_to_pvh(tpte);
if (tpte) {
pv->pv_pmap->pm_stats.resident_count--;
if (tpte & PG_W)
pv->pv_pmap->pm_stats.wired_count--;
/*
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
}
pv->pv_pmap->pm_stats.resident_count--;
if (tpte & PG_W)
pv->pv_pmap->pm_stats.wired_count--;
/*
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
}
npv = TAILQ_NEXT(pv, pv_plist);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
--ppv->pv_list_count;
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
}
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
splx(s);
invltlb();
pmap_unlock(pmap);
#endif
}
/*
@ -2501,18 +2449,11 @@ pmap_testbit(pa, bit)
continue;
}
#endif
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL) {
pmap_unlock(pv->pv_pmap);
continue;
}
if (*pte & bit) {
pmap_unlock(pv->pv_pmap);
splx(s);
return TRUE;
}
pmap_unlock(pv->pv_pmap);
}
splx(s);
return (FALSE);
@ -2563,12 +2504,8 @@ pmap_changebit(pa, bit, setem)
}
#endif
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL) {
pmap_unlock(pv->pv_pmap);
continue;
}
if (setem) {
*(int *)pte |= bit;
changed = 1;
@ -2586,7 +2523,6 @@ pmap_changebit(pa, bit, setem)
}
}
}
pmap_unlock(pv->pv_pmap);
}
splx(s);
if (changed)
@ -2654,18 +2590,11 @@ pmap_is_referenced(vm_offset_t pa)
if (!pmap_track_modified(pv->pv_va))
continue;
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL) {
pmap_unlock(pv->pv_pmap);
continue;
}
if ((int) *pte & PG_A) {
pmap_unlock(pv->pv_pmap);
splx(s);
return TRUE;
}
pmap_unlock(pv->pv_pmap);
}
splx(s);
return (FALSE);
@ -2712,17 +2641,14 @@ pmap_ts_referenced(vm_offset_t pa)
if (!pmap_track_modified(pv->pv_va))
continue;
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL) {
pmap_unlock(pv->pv_pmap);
continue;
}
if (*pte & PG_A) {
rtval++;
*pte &= ~PG_A;
}
pmap_unlock(pv->pv_pmap);
}
splx(s);
if (rtval) {
@ -2841,10 +2767,8 @@ pmap_mincore(pmap, addr)
unsigned *ptep, pte;
int val = 0;
pmap_lock(pmap);
ptep = pmap_pte(pmap, addr);
if (ptep == 0) {
pmap_unlock(pmap);
return 0;
}
@ -2877,7 +2801,6 @@ pmap_mincore(pmap, addr)
pmap_is_referenced(pa))
val |= MINCORE_REFERENCED_OTHER;
}
pmap_unlock(pmap);
return val;
}

View File

@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.43 1996/09/08 16:57:45 dyson Exp $
* $Id: pmap.h,v 1.44 1996/10/12 20:36:04 bde Exp $
*/
#ifndef _MACHINE_PMAP_H_
@ -198,7 +198,9 @@ typedef struct pv_entry {
pmap_t pv_pmap; /* pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
TAILQ_ENTRY(pv_entry) pv_list;
#if PMAP_PVLIST
TAILQ_ENTRY(pv_entry) pv_plist;
#endif
vm_page_t pv_ptem; /* VM page for pte */
} *pv_entry_t;

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.123 1996/10/09 19:47:19 bde Exp $
* $Id: pmap.c,v 1.124 1996/10/12 20:36:15 bde Exp $
*/
/*
@ -70,6 +70,9 @@
#include "opt_cpu.h"
#define PMAP_LOCK 1
#define PMAP_PVLIST 1
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -204,9 +207,6 @@ static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex));
static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex));
static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
static PMAP_INLINE void pmap_lock __P((pmap_t pmap));
static PMAP_INLINE void pmap_unlock __P((pmap_t pmap));
static void pmap_lock2 __P((pmap_t pmap1, pmap_t pmap2));
static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
#define PDSTACKMAX 6
@ -259,7 +259,9 @@ pmap_bootstrap(firstaddr, loadaddr)
kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD);
kernel_pmap->pm_count = 1;
#if PMAP_PVLIST
TAILQ_INIT(&kernel_pmap->pm_pvlist);
#endif
nkpt = NKPT;
/*
@ -434,58 +436,6 @@ invltlb_2pg( vm_offset_t va1, vm_offset_t va2) {
}
}
static PMAP_INLINE void
pmap_lock(pmap)
pmap_t pmap;
{
int s;
if (pmap == kernel_pmap)
return;
s = splhigh();
while (pmap->pm_flags & PM_FLAG_LOCKED) {
pmap->pm_flags |= PM_FLAG_WANTED;
tsleep(pmap, PVM - 1, "pmaplk", 0);
}
splx(s);
}
static PMAP_INLINE void
pmap_unlock(pmap)
pmap_t pmap;
{
int s;
if (pmap == kernel_pmap)
return;
s = splhigh();
pmap->pm_flags &= ~PM_FLAG_LOCKED;
if (pmap->pm_flags & PM_FLAG_WANTED) {
pmap->pm_flags &= ~PM_FLAG_WANTED;
wakeup(pmap);
}
}
static void
pmap_lock2(pmap1, pmap2)
pmap_t pmap1, pmap2;
{
int s;
if (pmap1 == kernel_pmap || pmap2 == kernel_pmap)
return;
s = splhigh();
while ((pmap1->pm_flags | pmap2->pm_flags) & PM_FLAG_LOCKED) {
while (pmap1->pm_flags & PM_FLAG_LOCKED) {
pmap1->pm_flags |= PM_FLAG_WANTED;
tsleep(pmap1, PVM - 1, "pmapl1", 0);
}
while (pmap2->pm_flags & PM_FLAG_LOCKED) {
pmap2->pm_flags |= PM_FLAG_WANTED;
tsleep(pmap2, PVM - 1, "pmapl2", 0);
}
}
splx(s);
}
static unsigned *
get_ptbase(pmap)
pmap_t pmap;
@ -566,15 +516,12 @@ pmap_extract(pmap, va)
vm_offset_t va;
{
vm_offset_t rtval;
pmap_lock(pmap);
if (pmap && *pmap_pde(pmap, va)) {
unsigned *pte;
pte = get_ptbase(pmap) + i386_btop(va);
rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
pmap_unlock(pmap);
return rtval;
}
pmap_unlock(pmap);
return 0;
}
@ -871,7 +818,9 @@ pmap_pinit(pmap)
pmap->pm_flags = 0;
pmap->pm_count = 1;
pmap->pm_ptphint = NULL;
#if PMAP_PVLIST
TAILQ_INIT(&pmap->pm_pvlist);
#endif
}
static int
@ -1081,10 +1030,11 @@ pmap_release(pmap)
vm_page_t p,n,ptdpg;
vm_object_t object = pmap->pm_pteobj;
#if defined(DIAGNOSTIC)
if (object->ref_count != 1)
panic("pmap_release: pteobj reference count != 1");
#endif
pmap_lock(pmap);
ptdpg = NULL;
retry:
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
@ -1336,13 +1286,16 @@ pmap_remove_entry(pmap, ppv, va)
int s;
s = splvm();
#if PMAP_PVLIST
if (ppv->pv_list_count < pmap->pm_stats.resident_count) {
#endif
for (pv = TAILQ_FIRST(&ppv->pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
break;
}
#if PMAP_PVLIST
} else {
for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
pv;
@ -1351,13 +1304,20 @@ pmap_remove_entry(pmap, ppv, va)
break;
}
}
#endif
rtval = 0;
if (pv) {
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
--ppv->pv_list_count;
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
}
#if PMAP_PVLIST
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
#endif
free_pv_entry(pv);
}
@ -1387,7 +1347,9 @@ pmap_insert_entry(pmap, va, mpte, pa)
pv->pv_pmap = pmap;
pv->pv_ptem = mpte;
#if PMAP_PVLIST
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
#endif
ppv = pa_to_pvh(pa);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
@ -1481,7 +1443,6 @@ pmap_remove(pmap, sva, eva)
if (pmap == NULL)
return;
pmap_lock(pmap);
/*
* special handling of removing one page. a very
* common operation and easy to short circuit some
@ -1489,7 +1450,6 @@ pmap_remove(pmap, sva, eva)
*/
if ((sva + PAGE_SIZE) == eva) {
pmap_remove_page(pmap, sva);
pmap_unlock(pmap);
return;
}
@ -1545,7 +1505,6 @@ pmap_remove(pmap, sva, eva)
if (anyvalid) {
invltlb();
}
pmap_unlock(pmap);
}
/*
@ -1587,7 +1546,6 @@ pmap_remove_all(pa)
s = splvm();
ppv = pa_to_pvh(pa);
while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
pv->pv_pmap->pm_stats.resident_count--;
@ -1614,13 +1572,16 @@ pmap_remove_all(pa)
update_needed = 1;
}
#if PMAP_PVLIST
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
#endif
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
--ppv->pv_list_count;
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
pmap_unlock(pv->pv_pmap);
free_pv_entry(pv);
}
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
if (update_needed)
invltlb();
@ -1656,7 +1617,6 @@ pmap_protect(pmap, sva, eva, prot)
return;
}
pmap_lock(pmap);
anychanged = 0;
ptbase = get_ptbase(pmap);
@ -1697,7 +1657,6 @@ pmap_protect(pmap, sva, eva, prot)
}
}
}
pmap_unlock(pmap);
if (anychanged)
invltlb();
}
@ -1730,7 +1689,6 @@ pmap_enter(pmap, va, pa, prot, wired)
if (pmap == NULL)
return;
pmap_lock(pmap);
va &= PG_FRAME;
#ifdef PMAP_DIAGNOSTIC
if (va > VM_MAX_KERNEL_ADDRESS)
@ -1849,7 +1807,6 @@ pmap_enter(pmap, va, pa, prot, wired)
if (origpte)
invltlb_1pg(va);
}
pmap_unlock(pmap);
}
/*
@ -1981,7 +1938,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
return;
}
pmap_lock(pmap);
if (psize + pindex > object->size)
psize = object->size - pindex;
@ -2040,7 +1996,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
}
}
}
pmap_unlock(pmap);
return;
}
@ -2077,7 +2032,6 @@ pmap_prefault(pmap, addra, entry, object)
if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap))
return;
pmap_lock(pmap);
starta = addra - PFBAK * PAGE_SIZE;
if (starta < entry->start) {
starta = entry->start;
@ -2132,7 +2086,6 @@ pmap_prefault(pmap, addra, entry, object)
PAGE_WAKEUP(m);
}
}
pmap_unlock(pmap);
}
/*
@ -2153,7 +2106,6 @@ pmap_change_wiring(pmap, va, wired)
if (pmap == NULL)
return;
pmap_lock(pmap);
pte = pmap_pte(pmap, va);
if (wired && !pmap_pte_w(pte))
@ -2166,7 +2118,6 @@ pmap_change_wiring(pmap, va, wired)
* invalidate TLB.
*/
pmap_pte_set_w(pte, wired);
pmap_unlock(pmap);
}
@ -2194,11 +2145,8 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
if (dst_addr != src_addr)
return;
pmap_lock2(src_pmap, dst_pmap);
src_frame = ((unsigned) src_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
if (src_frame != (((unsigned) PTDpde) & PG_FRAME)) {
pmap_unlock(src_pmap);
pmap_unlock(dst_pmap);
return;
}
@ -2268,8 +2216,6 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
++dst_pte;
}
}
pmap_unlock(src_pmap);
pmap_unlock(dst_pmap);
}
/*
@ -2402,6 +2348,8 @@ pmap_remove_pages(pmap, sva, eva)
pv_entry_t pv, npv;
int s;
#if PMAP_PVLIST
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap)) {
printf("warning: pmap_remove_pages called with non-current pmap\n");
@ -2409,9 +2357,7 @@ pmap_remove_pages(pmap, sva, eva)
}
#endif
pmap_lock(pmap);
s = splhigh();
s = splvm();
for(pv = TAILQ_FIRST(&pmap->pm_pvlist);
pv;
pv = npv) {
@ -2431,30 +2377,32 @@ pmap_remove_pages(pmap, sva, eva)
ppv = pa_to_pvh(tpte);
if (tpte) {
pv->pv_pmap->pm_stats.resident_count--;
if (tpte & PG_W)
pv->pv_pmap->pm_stats.wired_count--;
/*
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
}
pv->pv_pmap->pm_stats.resident_count--;
if (tpte & PG_W)
pv->pv_pmap->pm_stats.wired_count--;
/*
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
ppv->pv_vm_page->dirty = VM_PAGE_BITS_ALL;
}
npv = TAILQ_NEXT(pv, pv_plist);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
--ppv->pv_list_count;
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
ppv->pv_vm_page->flags &= ~(PG_MAPPED|PG_WRITEABLE);
}
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
splx(s);
invltlb();
pmap_unlock(pmap);
#endif
}
/*
@ -2501,18 +2449,11 @@ pmap_testbit(pa, bit)
continue;
}
#endif
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL) {
pmap_unlock(pv->pv_pmap);
continue;
}
if (*pte & bit) {
pmap_unlock(pv->pv_pmap);
splx(s);
return TRUE;
}
pmap_unlock(pv->pv_pmap);
}
splx(s);
return (FALSE);
@ -2563,12 +2504,8 @@ pmap_changebit(pa, bit, setem)
}
#endif
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL) {
pmap_unlock(pv->pv_pmap);
continue;
}
if (setem) {
*(int *)pte |= bit;
changed = 1;
@ -2586,7 +2523,6 @@ pmap_changebit(pa, bit, setem)
}
}
}
pmap_unlock(pv->pv_pmap);
}
splx(s);
if (changed)
@ -2654,18 +2590,11 @@ pmap_is_referenced(vm_offset_t pa)
if (!pmap_track_modified(pv->pv_va))
continue;
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL) {
pmap_unlock(pv->pv_pmap);
continue;
}
if ((int) *pte & PG_A) {
pmap_unlock(pv->pv_pmap);
splx(s);
return TRUE;
}
pmap_unlock(pv->pv_pmap);
}
splx(s);
return (FALSE);
@ -2712,17 +2641,14 @@ pmap_ts_referenced(vm_offset_t pa)
if (!pmap_track_modified(pv->pv_va))
continue;
pmap_lock(pv->pv_pmap);
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
if (pte == NULL) {
pmap_unlock(pv->pv_pmap);
continue;
}
if (*pte & PG_A) {
rtval++;
*pte &= ~PG_A;
}
pmap_unlock(pv->pv_pmap);
}
splx(s);
if (rtval) {
@ -2841,10 +2767,8 @@ pmap_mincore(pmap, addr)
unsigned *ptep, pte;
int val = 0;
pmap_lock(pmap);
ptep = pmap_pte(pmap, addr);
if (ptep == 0) {
pmap_unlock(pmap);
return 0;
}
@ -2877,7 +2801,6 @@ pmap_mincore(pmap, addr)
pmap_is_referenced(pa))
val |= MINCORE_REFERENCED_OTHER;
}
pmap_unlock(pmap);
return val;
}

View File

@ -42,7 +42,7 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.43 1996/09/08 16:57:45 dyson Exp $
* $Id: pmap.h,v 1.44 1996/10/12 20:36:04 bde Exp $
*/
#ifndef _MACHINE_PMAP_H_
@ -198,7 +198,9 @@ typedef struct pv_entry {
pmap_t pv_pmap; /* pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
TAILQ_ENTRY(pv_entry) pv_list;
#if PMAP_PVLIST
TAILQ_ENTRY(pv_entry) pv_plist;
#endif
vm_page_t pv_ptem; /* VM page for pte */
} *pv_entry_t;

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: kern_exec.c,v 1.45 1996/07/27 03:23:41 dyson Exp $
* $Id: kern_exec.c,v 1.46 1996/07/30 03:08:34 dyson Exp $
*/
#include <sys/param.h>
@ -368,6 +368,7 @@ exec_new_vmspace(imgp)
if (vmspace->vm_shm)
shmexit(imgp->proc);
vm_map_remove(&vmspace->vm_map, 0, USRSTACK);
pmap_remove_pages(&vmspace->vm_pmap, 0, USRSTACK);
/* Allocate a new stack */
error = vm_map_find(&vmspace->vm_map, NULL, 0, (vm_offset_t *)&stack_addr,

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
* $Id: kern_exit.c,v 1.39 1996/09/13 09:17:03 bde Exp $
* $Id: kern_exit.c,v 1.40 1996/10/04 23:43:12 julian Exp $
*/
#include "opt_ktrace.h"
@ -182,9 +182,12 @@ exit1(p, rv)
* Can't free the entire vmspace as the kernel stack
* may be mapped within that space also.
*/
if (vm->vm_refcnt == 1)
if (vm->vm_refcnt == 1) {
pmap_remove_pages(&vm->vm_pmap, VM_MIN_ADDRESS,
VM_MAXUSER_ADDRESS);
(void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS,
VM_MAXUSER_ADDRESS);
}
if (SESS_LEADER(p)) {
register struct session *sp = p->p_session;

View File

@ -54,7 +54,7 @@
* functioning of this software, nor does the author assume any responsibility
* for damages incurred with its use.
*
* $Id: subr_rlist.c,v 1.16 1996/03/02 22:57:45 dyson Exp $
* $Id: subr_rlist.c,v 1.17 1996/03/03 21:10:52 dyson Exp $
*/
#include <sys/param.h>
@ -83,7 +83,7 @@ rlist_malloc()
struct rlist *rl;
int i;
while( rlist_count < RLIST_MIN) {
int s = splhigh();
int s = splvm();
rl = (struct rlist *)kmem_alloc(kernel_map, PAGE_SIZE);
splx(s);
if( !rl)
@ -122,7 +122,7 @@ rlist_free(rlh, start, end)
struct rlist *prev_rlp = NULL, *cur_rlp = *rlp, *next_rlp = NULL;
int s;
s = splhigh();
s = splvm();
while (rlh->rlh_lock & RLH_LOCKED) {
rlh->rlh_lock |= RLH_DESIRED;
tsleep(rlh, PSWP, "rlistf", 0);
@ -241,7 +241,7 @@ rlist_alloc (rlh, size, loc)
int s;
register struct rlist *olp = 0;
s = splhigh();
s = splvm();
while (rlh->rlh_lock & RLH_LOCKED) {
rlh->rlh_lock |= RLH_DESIRED;
tsleep(rlh, PSWP, "rlistf", 0);