Implement an optimization of the VM<->pmap API. Pass vm_page_t's directly

to various pmap_*() functions instead of looking up the physical address
and passing that.  In many cases, the first thing the pmap code was doing
was going to a lot of trouble to get back the original vm_page_t, or
it's shadow pv_table entry.

Inspired by: John Dyson's 1998 patches.

Also:
Eliminate pv_table as a seperate thing and build it into a machine
dependent part of vm_page_t.  This eliminates having a seperate set of
structions that shadow each other in a 1:1 fashion that we often went to
a lot of trouble to translate from one to the other. (see above)
This happens to save 4 bytes of physical memory for each page in the
system.  (8 bytes on the Alpha).

Eliminate the use of the phys_avail[] array to determine if a page is
managed (ie: it has pv_entries etc).  Store this information in a flag.
Things like device_pager set it because they create vm_page_t's on the
fly that do not have pv_entries.  This makes it easier to "unmanage" a
page of physical memory (this will be taken advantage of in subsequent
commits).

Add a function to add a new page to the freelist.  This could be used
for reclaiming the previously wasted pages left over from preloaded
loader(8) files.

Reviewed by:	dillon
This commit is contained in:
Peter Wemm 2000-05-21 12:50:18 +00:00
parent 4f91f96d90
commit 0385347c1a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=60755
21 changed files with 542 additions and 740 deletions

View File

@ -238,9 +238,6 @@ pmap_break(void)
#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p])
int protection_codes[2][8];
#define pa_index(pa) atop((pa) - vm_first_phys)
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
/*
* Return non-zero if this pmap is currently active
*/
@ -320,8 +317,6 @@ vm_offset_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
static int pv_npg;
static vm_object_t kptobj;
@ -351,25 +346,23 @@ static struct pv_entry *pvinit;
*/
pt_entry_t *CMAP1 = 0;
static pt_entry_t *CMAP2;
static pv_table_t *pv_table;
caddr_t CADDR1;
static caddr_t CADDR2;
static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static pv_entry_t get_pv_entry __P((void));
static void alpha_protection_init __P((void));
static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
static void pmap_changebit __P((vm_page_t m, int bit, boolean_t setem));
static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa));
static void pmap_remove_all __P((vm_offset_t pa));
static void pmap_remove_all __P((vm_page_t m));
static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
vm_offset_t pa, vm_page_t mpte));
vm_page_t m, vm_page_t mpte));
static int pmap_remove_pte __P((pmap_t pmap, pt_entry_t* ptq, vm_offset_t sva));
static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va));
static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv,
static int pmap_remove_entry __P((struct pmap *pmap, vm_page_t m,
vm_offset_t va));
static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
vm_page_t mpte, vm_offset_t pa));
vm_page_t mpte, vm_page_t m));
static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
@ -628,45 +621,34 @@ void
pmap_init(phys_start, phys_end)
vm_offset_t phys_start, phys_end;
{
vm_offset_t addr;
vm_size_t s;
int i;
int initial_pvs;
/*
* calculate the number of pv_entries needed
*/
vm_first_phys = phys_avail[0];
for (i = 0; phys_avail[i + 1]; i += 2);
pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
for(i = 0; i < pv_npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
pa = vm_first_phys + i * PAGE_SIZE;
pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
}
for(i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
m->md.pv_flags = 0;
}
/*
* init the pv free list
*/
initial_pvs = pv_npg;
initial_pvs = vm_page_array_size;
if (initial_pvs < MINPV)
initial_pvs = MINPV;
pvzone = &pvzone_store;
pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
initial_pvs * sizeof (struct pv_entry));
zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg);
zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
vm_page_array_size);
/*
* object for kernel page table pages
*/
@ -686,29 +668,11 @@ pmap_init(phys_start, phys_end)
void
pmap_init2()
{
pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg;
pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
pv_entry_high_water = 9 * (pv_entry_max / 10);
zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
}
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
*/
vm_offset_t
pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
{
while (start < end) {
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
}
/***************************************************
* Manipulate TLBs for a pmap
@ -825,25 +789,6 @@ pmap_extract(pmap, va)
return 0;
}
/*
* determine if a page is managed (memory vs. device)
*/
static PMAP_INLINE int
pmap_is_managed(pa)
vm_offset_t pa;
{
int i;
if (!pmap_initialized)
return 0;
for (i = 0; phys_avail[i + 1]; i += 2) {
if (pa < phys_avail[i + 1] && pa >= phys_avail[i])
return 1;
}
return 0;
}
/***************************************************
* Low level mapping routines.....
@ -932,6 +877,25 @@ pmap_kremove(vm_offset_t va)
pmap_invalidate_page(kernel_pmap, va);
}
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
*/
vm_offset_t
pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
{
while (start < end) {
pmap_kenter(virt, start);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
}
static vm_page_t
pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
{
@ -1700,9 +1664,7 @@ get_pv_entry(void)
void
pmap_collect()
{
pv_table_t *ppv;
int i;
vm_offset_t pa;
vm_page_t m;
static int warningdone=0;
@ -1714,16 +1676,12 @@ pmap_collect()
warningdone++;
}
for(i = 0; i < pv_npg; i++) {
if ((ppv = &pv_table[i]) == 0)
continue;
m = ppv->pv_vm_page;
if ((pa = VM_PAGE_TO_PHYS(m)) == 0)
continue;
for(i = 0; i < vm_page_array_size; i++) {
m = &vm_page_array[i];
if (m->wire_count || m->hold_count || m->busy ||
(m->flags & PG_BUSY))
(m->flags & PG_BUSY))
continue;
pmap_remove_all(pa);
pmap_remove_all(m);
}
pmap_pagedaemon_waken = 0;
}
@ -1737,15 +1695,15 @@ pmap_collect()
*/
static int
pmap_remove_entry(pmap_t pmap, pv_table_t* ppv, vm_offset_t va)
pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
{
pv_entry_t pv;
int rtval;
int s;
s = splvm();
if (ppv->pv_list_count < pmap->pm_stats.resident_count) {
for (pv = TAILQ_FIRST(&ppv->pv_list);
if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
@ -1763,10 +1721,10 @@ pmap_remove_entry(pmap_t pmap, pv_table_t* ppv, vm_offset_t va)
rtval = 0;
if (pv) {
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count--;
if (TAILQ_FIRST(&ppv->pv_list) == NULL)
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
free_pv_entry(pv);
@ -1781,12 +1739,11 @@ pmap_remove_entry(pmap_t pmap, pv_table_t* ppv, vm_offset_t va)
* (pmap, va).
*/
static void
pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_offset_t pa)
pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
{
int s;
pv_entry_t pv;
pv_table_t *ppv;
s = splvm();
pv = get_pv_entry();
@ -1795,10 +1752,8 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_offset_t pa)
pv->pv_ptem = mpte;
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
ppv = pa_to_pvh(pa);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count++;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
splx(s);
}
@ -1810,7 +1765,7 @@ static int
pmap_remove_pte(pmap_t pmap, pt_entry_t* ptq, vm_offset_t va)
{
pt_entry_t oldpte;
pv_table_t *ppv;
vm_page_t m;
oldpte = *ptq;
PMAP_DEBUG_VA(va);
@ -1820,8 +1775,8 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t* ptq, vm_offset_t va)
pmap->pm_stats.resident_count -= 1;
if (oldpte & PG_MANAGED) {
ppv = pa_to_pvh(pmap_pte_pa(&oldpte));
return pmap_remove_entry(pmap, ppv, va);
m = PHYS_TO_VM_PAGE(pmap_pte_pa(&oldpte));
return pmap_remove_entry(pmap, m, va);
} else {
return pmap_unuse_pt(pmap, va, NULL);
}
@ -1911,10 +1866,9 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
*/
static void
pmap_remove_all(vm_offset_t pa)
pmap_remove_all(vm_page_t m)
{
register pv_entry_t pv;
pv_table_t *ppv;
pt_entry_t *pte, tpte;
int nmodify;
int s;
@ -1925,20 +1879,19 @@ pmap_remove_all(vm_offset_t pa)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
if (!pmap_is_managed(pa)) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", pa);
if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHSY(m));
}
#endif
s = splvm();
ppv = pa_to_pvh(pa);
while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va);
pv->pv_pmap->pm_stats.resident_count--;
if (pmap_pte_pa(pte) != pa)
panic("pmap_remove_all: pv_table for %lx is inconsistent", pa);
if (pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(m))
panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m));
tpte = *pte;
@ -1950,13 +1903,13 @@ pmap_remove_all(vm_offset_t pa)
pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
splx(s);
return;
@ -2039,9 +1992,10 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* insert this page into the given map NOW.
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
vm_offset_t pa;
pt_entry_t *pte;
vm_offset_t opa;
pt_entry_t origpte, newpte;
@ -2076,7 +2030,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
}
origpte = *pte;
pa &= ~PAGE_MASK;
pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK;
managed = 0;
opa = pmap_pte_pa(pte);
@ -2116,12 +2070,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
}
/*
* Enter on the PV list if part of our managed memory Note that we
* Enter on the PV list if part of our managed memory. Note that we
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
if (pmap_is_managed(pa)) {
pmap_insert_entry(pmap, va, mpte, pa);
if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) {
pmap_insert_entry(pmap, va, mpte, m);
managed |= PG_MANAGED;
}
@ -2139,15 +2093,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
newpte = pmap_phys_to_pte(pa) | pte_prot(pmap, prot) | PG_V | managed;
if (managed) {
pv_table_t* ppv;
vm_page_t om;
/*
* Set up referenced/modified emulation for the new mapping
*/
ppv = pa_to_pvh(pa);
if ((ppv->pv_flags & PV_TABLE_REF) == 0)
om = PHYS_TO_VM_PAGE(pa);
if ((om->md.pv_flags & PV_TABLE_REF) == 0)
newpte |= PG_FOR | PG_FOW | PG_FOE;
else if ((ppv->pv_flags & PV_TABLE_MOD) == 0)
else if ((om->md.pv_flags & PV_TABLE_MOD) == 0)
newpte |= PG_FOW;
}
@ -2180,7 +2134,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
*/
static vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_page_t mpte)
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
register pt_entry_t *pte;
@ -2244,12 +2198,12 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_page_t mpte)
}
/*
* Enter on the PV list if part of our managed memory Note that we
* Enter on the PV list if part of our managed memory. Note that we
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
PMAP_DEBUG_VA(va);
pmap_insert_entry(pmap, va, mpte, pa);
pmap_insert_entry(pmap, va, mpte, m);
/*
* Increment counters
@ -2259,7 +2213,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_page_t mpte)
/*
* Now validate mapping with RO protection
*/
*pte = pmap_phys_to_pte(pa) | PG_V | PG_KRE | PG_URE | PG_MANAGED;
*pte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V | PG_KRE | PG_URE | PG_MANAGED;
alpha_pal_imb(); /* XXX overkill? */
return mpte;
@ -2321,8 +2275,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_page_deactivate(p);
vm_page_busy(p);
mpte = pmap_enter_quick(pmap,
addr + alpha_ptob(tmpidx),
VM_PAGE_TO_PHYS(p), mpte);
addr + alpha_ptob(tmpidx), p, mpte);
vm_page_flag_set(p, PG_MAPPED);
vm_page_wakeup(p);
}
@ -2341,8 +2294,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_page_deactivate(p);
vm_page_busy(p);
mpte = pmap_enter_quick(pmap,
addr + alpha_ptob(tmpidx),
VM_PAGE_TO_PHYS(p), mpte);
addr + alpha_ptob(tmpidx), p, mpte);
vm_page_flag_set(p, PG_MAPPED);
vm_page_wakeup(p);
}
@ -2437,8 +2389,7 @@ pmap_prefault(pmap, addra, entry)
vm_page_deactivate(m);
}
vm_page_busy(m);
mpte = pmap_enter_quick(pmap, addr,
VM_PAGE_TO_PHYS(m), mpte);
mpte = pmap_enter_quick(pmap, addr, m, mpte);
vm_page_flag_set(m, PG_MAPPED);
vm_page_wakeup(m);
}
@ -2575,24 +2526,22 @@ pmap_pageable(pmap, sva, eva, pageable)
* in the given pmap.
*/
boolean_t
pmap_page_exists(pmap, pa)
pmap_page_exists(pmap, m)
pmap_t pmap;
vm_offset_t pa;
vm_page_t m;
{
register pv_entry_t pv;
pv_table_t *ppv;
int s;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
s = splvm();
ppv = pa_to_pvh(pa);
/*
* Not found, check current mappings returning immediately if found.
*/
for (pv = TAILQ_FIRST(&ppv->pv_list);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
if (pv->pv_pmap == pmap) {
@ -2619,7 +2568,7 @@ pmap_remove_pages(pmap, sva, eva)
vm_offset_t sva, eva;
{
pt_entry_t *pte, tpte;
pv_table_t *ppv;
vm_page_t m;
pv_entry_t pv, npv;
int s;
@ -2660,17 +2609,17 @@ pmap_remove_pages(pmap, sva, eva)
PMAP_DEBUG_VA(pv->pv_va);
*pte = 0;
ppv = pa_to_pvh(pmap_pte_pa(&tpte));
m = PHYS_TO_VM_PAGE(pmap_pte_pa(&tpte));
pv->pv_pmap->pm_stats.resident_count--;
npv = TAILQ_NEXT(pv, pv_plist);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
ppv->pv_list_count--;
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
m->md.pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
}
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
@ -2684,26 +2633,24 @@ pmap_remove_pages(pmap, sva, eva)
* this routine is used to modify bits in ptes
*/
static void
pmap_changebit(vm_offset_t pa, int bit, boolean_t setem)
pmap_changebit(vm_page_t m, int bit, boolean_t setem)
{
pv_entry_t pv;
pv_table_t *ppv;
pt_entry_t *pte;
int changed;
int s;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return;
s = splvm();
changed = 0;
ppv = pa_to_pvh(pa);
/*
* Loop over all current mappings setting/clearing as appropos If
* setting RO do we need to clear the VAC?
*/
for (pv = TAILQ_FIRST(&ppv->pv_list);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
@ -2747,13 +2694,13 @@ pmap_changebit(vm_offset_t pa, int bit, boolean_t setem)
* Lower the permission for all mappings to a given page.
*/
void
pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
pmap_changebit(phys, PG_KWE|PG_UWE, FALSE);
pmap_changebit(m, PG_KWE|PG_UWE, FALSE);
} else {
pmap_remove_all(phys);
pmap_remove_all(m);
}
}
}
@ -2772,18 +2719,14 @@ pmap_phys_address(ppn)
*
*/
int
pmap_ts_referenced(vm_offset_t pa)
pmap_ts_referenced(vm_page_t m)
{
pv_table_t *ppv;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return 0;
ppv = pa_to_pvh(pa);
if (ppv->pv_flags & PV_TABLE_REF) {
pmap_changebit(pa, PG_FOR|PG_FOE|PG_FOW, TRUE);
ppv->pv_flags &= ~PV_TABLE_REF;
if (m->md.pv_flags & PV_TABLE_REF) {
pmap_changebit(m, PG_FOR|PG_FOE|PG_FOW, TRUE);
m->md.pv_flags &= ~PV_TABLE_REF;
return 1;
}
@ -2797,34 +2740,27 @@ pmap_ts_referenced(vm_offset_t pa)
* in any physical maps.
*/
boolean_t
pmap_is_modified(vm_offset_t pa)
pmap_is_modified(vm_page_t m)
{
pv_table_t *ppv;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
ppv = pa_to_pvh(pa);
return (ppv->pv_flags & PV_TABLE_MOD) != 0;
return (m->md.pv_flags & PV_TABLE_MOD) != 0;
}
/*
* Clear the modify bits on the specified physical page.
*/
void
pmap_clear_modify(vm_offset_t pa)
pmap_clear_modify(vm_page_t m)
{
pv_table_t *ppv;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return;
ppv = pa_to_pvh(pa);
if (ppv->pv_flags & PV_TABLE_MOD) {
pmap_changebit(pa, PG_FOW, TRUE);
ppv->pv_flags &= ~PV_TABLE_MOD;
if (m->md.pv_flags & PV_TABLE_MOD) {
pmap_changebit(m, PG_FOW, TRUE);
m->md.pv_flags &= ~PV_TABLE_MOD;
}
}
@ -2838,10 +2774,7 @@ pmap_clear_modify(vm_offset_t pa)
void
pmap_page_is_free(vm_page_t m)
{
pv_table_t *ppv;
ppv = pa_to_pvh(VM_PAGE_TO_PHYS(m));
ppv->pv_flags = 0;
m->md.pv_flags = 0;
}
/*
@ -2850,18 +2783,14 @@ pmap_page_is_free(vm_page_t m)
* Clear the reference bit on the specified physical page.
*/
void
pmap_clear_reference(vm_offset_t pa)
pmap_clear_reference(vm_page_t m)
{
pv_table_t *ppv;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return;
ppv = pa_to_pvh(pa);
if (ppv->pv_flags & PV_TABLE_REF) {
pmap_changebit(pa, PG_FOR|PG_FOE|PG_FOW, TRUE);
ppv->pv_flags &= ~PV_TABLE_REF;
if (m->md.pv_flags & PV_TABLE_REF) {
pmap_changebit(m, PG_FOR|PG_FOE|PG_FOW, TRUE);
m->md.pv_flags &= ~PV_TABLE_REF;
}
}
@ -2876,7 +2805,7 @@ pmap_emulate_reference(struct proc *p, vm_offset_t v, int user, int write)
{
pt_entry_t faultoff, *pte;
vm_offset_t pa;
pv_table_t *ppv;
vm_page_t m;
/*
* Convert process and virtual address to physical address.
@ -2932,16 +2861,16 @@ pmap_emulate_reference(struct proc *p, vm_offset_t v, int user, int write)
* (1) always mark page as used, and
* (2) if it was a write fault, mark page as modified.
*/
ppv = pa_to_pvh(pa);
ppv->pv_flags |= PV_TABLE_REF;
m = PHYS_TO_VM_PAGE(pa);
m->md.pv_flags |= PV_TABLE_REF;
faultoff = PG_FOR | PG_FOE;
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
vm_page_flag_set(m, PG_REFERENCED);
if (write) {
ppv->pv_flags |= PV_TABLE_MOD;
vm_page_dirty(ppv->pv_vm_page);
m->md.pv_flags |= PV_TABLE_MOD;
vm_page_dirty(m);
faultoff |= PG_FOW;
}
pmap_changebit(pa, faultoff, FALSE);
pmap_changebit(m, faultoff, FALSE);
if ((*pte & faultoff) != 0) {
#if 1
/*
@ -3020,7 +2949,6 @@ pmap_mincore(pmap, addr)
{
pt_entry_t *pte;
vm_page_t m;
int val = 0;
pte = pmap_lev3pte(pmap, addr);
@ -3029,7 +2957,7 @@ pmap_mincore(pmap, addr)
}
if (pmap_pte_v(pte)) {
pv_table_t *ppv;
vm_page_t m;
vm_offset_t pa;
val = MINCORE_INCORE;
@ -3038,29 +2966,28 @@ pmap_mincore(pmap, addr)
pa = pmap_pte_pa(pte);
ppv = pa_to_pvh(pa);
m = ppv->pv_vm_page;
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
*/
if (ppv->pv_flags & PV_TABLE_MOD)
if (m->md.pv_flags & PV_TABLE_MOD)
val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
/*
* Modified by someone
*/
else if (m->dirty || pmap_is_modified(pa))
else if (m->dirty || pmap_is_modified(m))
val |= MINCORE_MODIFIED_OTHER;
/*
* Referenced by us
*/
if (ppv->pv_flags & PV_TABLE_REF)
if (m->md.pv_flags & PV_TABLE_REF)
val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
/*
* Referenced by someone
*/
else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) {
else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
val |= MINCORE_REFERENCED_OTHER;
vm_page_flag_set(m, PG_REFERENCED);
}
@ -3106,14 +3033,16 @@ pmap_deactivate(struct proc *p)
}
vm_offset_t
pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) {
pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
{
return addr;
}
#if 0
#if defined(PMAP_DEBUG)
pmap_pid_dump(int pid) {
pmap_pid_dump(int pid)
{
pmap_t pmap;
struct proc *p;
int npte = 0;
@ -3147,7 +3076,7 @@ pmap_pid_dump(int pid) {
vm_offset_t pa;
vm_page_t m;
pa = *(int *)pte;
m = PHYS_TO_VM_PAGE((pa & PG_FRAME));
m = PHYS_TO_VM_PAGE(pa);
printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
va, pa, m->hold_count, m->wire_count, m->flags);
npte++;
@ -3171,7 +3100,7 @@ pmap_pid_dump(int pid) {
#if defined(DEBUG)
static void pads __P((pmap_t pm));
static void pmap_pvdump __P((vm_offset_t pa));
static void pmap_pvdump __P((vm_page_t m));
/* print address space of pmap*/
static void
@ -3203,12 +3132,11 @@ static void
pmap_pvdump(pa)
vm_offset_t pa;
{
pv_table_t *ppv;
register pv_entry_t pv;
pv_entry_t pv;
printf("pa %x", pa);
ppv = pa_to_pvh(pa);
for (pv = TAILQ_FIRST(&ppv->pv_list);
m = PHYS_TO_VM_PAGE(pa);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
#ifdef used_to_be

View File

@ -158,12 +158,12 @@ alpha_XXX_dmamap(vm_offset_t va)
* Pmap stuff
*/
struct pv_entry;
typedef struct {
struct md_page {
int pv_list_count;
struct vm_page *pv_vm_page;
int pv_flags;
TAILQ_HEAD(,pv_entry) pv_list;
} pv_table_t;
};
#define PV_TABLE_MOD 0x01 /* modified */
#define PV_TABLE_REF 0x02 /* referenced */

View File

@ -176,16 +176,13 @@ mmrw(dev, uio, flags)
/* minor device 0 is physical memory */
case 0:
v = uio->uio_offset;
pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
TRUE);
pmap_kenter((vm_offset_t)ptvmmap, v);
o = (int)uio->uio_offset & PAGE_MASK;
c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));
c = min(c, (u_int)iov->iov_len);
error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
(vm_offset_t)&ptvmmap[PAGE_SIZE]);
pmap_kremove((vm_offset_t)ptvmmap);
continue;
/* minor device 1 is kernel memory */

View File

@ -144,9 +144,6 @@
#define pte_prot(m, p) (protection_codes[p])
static int protection_codes[8];
#define pa_index(pa) atop((pa) - vm_first_phys)
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
static struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
@ -155,10 +152,8 @@ vm_offset_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
static int pgeflag; /* PG_G or-in */
static int pseflag; /* PG_PS or-in */
static int pv_npg;
static vm_object_t kptobj;
@ -180,7 +175,6 @@ static struct pv_entry *pvinit;
*/
pt_entry_t *CMAP1 = 0;
static pt_entry_t *CMAP2, *ptmmap;
static pv_table_t *pv_table;
caddr_t CADDR1 = 0, ptvmmap = 0;
static caddr_t CADDR2;
static pt_entry_t *msgbufmap;
@ -197,21 +191,19 @@ static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static unsigned * get_ptbase __P((pmap_t pmap));
static pv_entry_t get_pv_entry __P((void));
static void i386_protection_init __P((void));
static __inline void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
static void pmap_clearbit __P((vm_offset_t pa, int bit));
static __inline void pmap_changebit __P((vm_page_t m, int bit, boolean_t setem));
static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa));
static void pmap_remove_all __P((vm_offset_t pa));
static void pmap_remove_all __P((vm_page_t m));
static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
vm_offset_t pa, vm_page_t mpte));
vm_page_t m, vm_page_t mpte));
static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq,
vm_offset_t sva));
static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va));
static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv,
static int pmap_remove_entry __P((struct pmap *pmap, vm_page_t m,
vm_offset_t va));
static boolean_t pmap_testbit __P((vm_offset_t pa, int bit));
static boolean_t pmap_testbit __P((vm_page_t m, int bit));
static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
vm_page_t mpte, vm_offset_t pa));
vm_page_t mpte, vm_page_t m));
static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
@ -256,7 +248,8 @@ pmap_pte(pmap, va)
* (.text, .data, .bss)
*/
static vm_offset_t
pmap_kmem_choose(vm_offset_t addr) {
pmap_kmem_choose(vm_offset_t addr)
{
vm_offset_t newaddr = addr;
#ifndef DISABLE_PSE
if (cpu_feature & CPUID_PSE) {
@ -488,8 +481,6 @@ void
pmap_init(phys_start, phys_end)
vm_offset_t phys_start, phys_end;
{
vm_offset_t addr;
vm_size_t s;
int i;
int initial_pvs;
@ -498,40 +489,30 @@ pmap_init(phys_start, phys_end)
*/
kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
/*
* calculate the number of pv_entries needed
*/
vm_first_phys = phys_avail[0];
for (i = 0; phys_avail[i + 1]; i += 2);
pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
for(i = 0; i < pv_npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
pa = vm_first_phys + i * PAGE_SIZE;
pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
for(i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* init the pv free list
*/
initial_pvs = pv_npg;
initial_pvs = vm_page_array_size;
if (initial_pvs < MINPV)
initial_pvs = MINPV;
pvzone = &pvzone_store;
pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
initial_pvs * sizeof (struct pv_entry));
zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg);
zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
vm_page_array_size);
/*
* Now it is safe to enable pv_table recording.
@ -545,34 +526,13 @@ pmap_init(phys_start, phys_end)
* numbers of pv entries.
*/
void
pmap_init2() {
pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg;
pmap_init2()
{
pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
pv_entry_high_water = 9 * (pv_entry_max / 10);
zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
}
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
vm_offset_t virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
while (start < end) {
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
}
/***************************************************
* Low level helper routines.....
@ -585,7 +545,8 @@ pmap_map(virt, start, end, prot)
* This should be an invalid condition.
*/
static int
pmap_nw_modified(pt_entry_t ptea) {
pmap_nw_modified(pt_entry_t ptea)
{
int pte;
pte = (int) ptea;
@ -603,7 +564,8 @@ pmap_nw_modified(pt_entry_t ptea) {
* not be tested for the modified bit.
*/
static PMAP_INLINE int
pmap_track_modified( vm_offset_t va) {
pmap_track_modified(vm_offset_t va)
{
if ((va < clean_sva) || (va >= clean_eva))
return 1;
else
@ -611,7 +573,8 @@ pmap_track_modified( vm_offset_t va) {
}
static PMAP_INLINE void
invltlb_1pg( vm_offset_t va) {
invltlb_1pg(vm_offset_t va)
{
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386) {
invltlb();
@ -742,26 +705,6 @@ pmap_extract(pmap, va)
}
/*
* determine if a page is managed (memory vs. device)
*/
static PMAP_INLINE int
pmap_is_managed(pa)
vm_offset_t pa;
{
int i;
if (!pmap_initialized)
return 0;
for (i = 0; phys_avail[i + 1]; i += 2) {
if (pa < phys_avail[i + 1] && pa >= phys_avail[i])
return 1;
}
return 0;
}
/***************************************************
* Low level mapping routines.....
***************************************************/
@ -801,6 +744,29 @@ pmap_kremove(va)
invltlb_1pg(va); /* XXX what about SMP? */
}
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
vm_offset_t virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
while (start < end) {
pmap_kenter(virt, start);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
}
/*
* Add a list of wired pages to the kva
* this routine is only used for temporary
@ -1078,7 +1044,8 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
}
static PMAP_INLINE int
pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
{
vm_page_unhold(m);
if (m->hold_count == 0)
return _pmap_unwire_pte_hold(pmap, m);
@ -1539,10 +1506,9 @@ get_pv_entry(void)
* in a pinch.
*/
void
pmap_collect() {
pv_table_t *ppv;
pmap_collect()
{
int i;
vm_offset_t pa;
vm_page_t m;
static int warningdone=0;
@ -1554,16 +1520,12 @@ pmap_collect() {
warningdone++;
}
for(i = 0; i < pv_npg; i++) {
if ((ppv = &pv_table[i]) == 0)
continue;
m = ppv->pv_vm_page;
if ((pa = VM_PAGE_TO_PHYS(m)) == 0)
continue;
for(i = 0; i < vm_page_array_size; i++) {
m = &vm_page_array[i];
if (m->wire_count || m->hold_count || m->busy ||
(m->flags & PG_BUSY))
(m->flags & PG_BUSY))
continue;
pmap_remove_all(pa);
pmap_remove_all(m);
}
pmap_pagedaemon_waken = 0;
}
@ -1577,9 +1539,9 @@ pmap_collect() {
*/
static int
pmap_remove_entry(pmap, ppv, va)
pmap_remove_entry(pmap, m, va)
struct pmap *pmap;
pv_table_t *ppv;
vm_page_t m;
vm_offset_t va;
{
pv_entry_t pv;
@ -1587,8 +1549,8 @@ pmap_remove_entry(pmap, ppv, va)
int s;
s = splvm();
if (ppv->pv_list_count < pmap->pm_stats.resident_count) {
for (pv = TAILQ_FIRST(&ppv->pv_list);
if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
@ -1607,10 +1569,10 @@ pmap_remove_entry(pmap, ppv, va)
if (pv) {
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count--;
if (TAILQ_FIRST(&ppv->pv_list) == NULL)
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
free_pv_entry(pv);
@ -1625,16 +1587,15 @@ pmap_remove_entry(pmap, ppv, va)
* (pmap, va).
*/
static void
pmap_insert_entry(pmap, va, mpte, pa)
pmap_insert_entry(pmap, va, mpte, m)
pmap_t pmap;
vm_offset_t va;
vm_page_t mpte;
vm_offset_t pa;
vm_page_t m;
{
int s;
pv_entry_t pv;
pv_table_t *ppv;
s = splvm();
pv = get_pv_entry();
@ -1643,10 +1604,8 @@ pmap_insert_entry(pmap, va, mpte, pa)
pv->pv_ptem = mpte;
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
ppv = pa_to_pvh(pa);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count++;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
splx(s);
}
@ -1661,7 +1620,7 @@ pmap_remove_pte(pmap, ptq, va)
vm_offset_t va;
{
unsigned oldpte;
pv_table_t *ppv;
vm_page_t m;
oldpte = loadandclear(ptq);
if (oldpte & PG_W)
@ -1674,7 +1633,7 @@ pmap_remove_pte(pmap, ptq, va)
invlpg(va);
pmap->pm_stats.resident_count -= 1;
if (oldpte & PG_MANAGED) {
ppv = pa_to_pvh(oldpte);
m = PHYS_TO_VM_PAGE(oldpte);
if (oldpte & PG_M) {
#if defined(PMAP_DIAGNOSTIC)
if (pmap_nw_modified((pt_entry_t) oldpte)) {
@ -1684,11 +1643,11 @@ pmap_remove_pte(pmap, ptq, va)
}
#endif
if (pmap_track_modified(va))
vm_page_dirty(ppv->pv_vm_page);
vm_page_dirty(m);
}
if (oldpte & PG_A)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
return pmap_remove_entry(pmap, ppv, va);
vm_page_flag_set(m, PG_REFERENCED);
return pmap_remove_entry(pmap, m, va);
} else {
return pmap_unuse_pt(pmap, va, NULL);
}
@ -1836,11 +1795,10 @@ pmap_remove(pmap, sva, eva)
*/
static void
pmap_remove_all(pa)
vm_offset_t pa;
pmap_remove_all(m)
vm_page_t m;
{
register pv_entry_t pv;
pv_table_t *ppv;
register unsigned *pte, tpte;
int s;
@ -1849,14 +1807,13 @@ pmap_remove_all(pa)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
if (!pmap_is_managed(pa)) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", pa);
if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
}
#endif
s = splvm();
ppv = pa_to_pvh(pa);
while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pv->pv_pmap->pm_stats.resident_count--;
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
@ -1866,7 +1823,7 @@ pmap_remove_all(pa)
pv->pv_pmap->pm_stats.wired_count--;
if (tpte & PG_A)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
vm_page_flag_set(m, PG_REFERENCED);
/*
* Update the vm_page_t clean and reference bits.
@ -1880,18 +1837,18 @@ pmap_remove_all(pa)
}
#endif
if (pmap_track_modified(pv->pv_va))
vm_page_dirty(ppv->pv_vm_page);
vm_page_dirty(m);
}
pmap_TLB_invalidate(pv->pv_pmap, pv->pv_va);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
splx(s);
}
@ -1908,7 +1865,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
vm_pindex_t sindex, eindex;
int anychanged;
if (pmap == NULL)
return;
@ -1955,22 +1911,22 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
for (; sindex != pdnxt; sindex++) {
unsigned pbits;
pv_table_t *ppv;
vm_page_t m;
pbits = ptbase[sindex];
if (pbits & PG_MANAGED) {
ppv = NULL;
m = NULL;
if (pbits & PG_A) {
ppv = pa_to_pvh(pbits);
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
m = PHYS_TO_VM_PAGE(pbits);
vm_page_flag_set(m, PG_REFERENCED);
pbits &= ~PG_A;
}
if (pbits & PG_M) {
if (pmap_track_modified(i386_ptob(sindex))) {
if (ppv == NULL)
ppv = pa_to_pvh(pbits);
vm_page_dirty(ppv->pv_vm_page);
if (m == NULL)
m = PHYS_TO_VM_PAGE(pbits);
vm_page_dirty(m);
pbits &= ~PG_M;
}
}
@ -2001,9 +1957,10 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* insert this page into the given map NOW.
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
vm_offset_t pa;
register unsigned *pte;
vm_offset_t opa;
vm_offset_t origpte, newpte;
@ -2058,8 +2015,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
(void *)pmap->pm_pdir[PTDPTDI], va);
}
pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
origpte = *(vm_offset_t *)pte;
pa &= PG_FRAME;
opa = origpte & PG_FRAME;
if (origpte & PG_PS)
@ -2114,9 +2071,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
*/
if (origpte & PG_MANAGED) {
if ((origpte & PG_M) && pmap_track_modified(va)) {
pv_table_t *ppv;
ppv = pa_to_pvh(opa);
vm_page_dirty(ppv->pv_vm_page);
vm_page_t om;
om = PHYS_TO_VM_PAGE(opa);
vm_page_dirty(om);
}
pa |= PG_MANAGED;
}
@ -2134,12 +2091,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
}
/*
* Enter on the PV list if part of our managed memory Note that we
* Enter on the PV list if part of our managed memory. Note that we
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
if (pmap_is_managed(pa)) {
pmap_insert_entry(pmap, va, mpte, pa);
if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) {
pmap_insert_entry(pmap, va, mpte, m);
pa |= PG_MANAGED;
}
@ -2193,13 +2150,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
*/
static vm_page_t
pmap_enter_quick(pmap, va, pa, mpte)
pmap_enter_quick(pmap, va, m, mpte)
register pmap_t pmap;
vm_offset_t va;
register vm_offset_t pa;
vm_page_t m;
vm_page_t mpte;
{
register unsigned *pte;
unsigned *pte;
vm_offset_t pa;
/*
* In the case that a page table page is not
@ -2261,17 +2219,19 @@ pmap_enter_quick(pmap, va, pa, mpte)
}
/*
* Enter on the PV list if part of our managed memory Note that we
* Enter on the PV list if part of our managed memory. Note that we
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
pmap_insert_entry(pmap, va, mpte, pa);
pmap_insert_entry(pmap, va, mpte, m);
/*
* Increment counters
*/
pmap->pm_stats.resident_count++;
pa = VM_PAGE_TO_PHYS(m);
/*
* Now validate mapping with RO protection
*/
@ -2399,8 +2359,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
vm_page_deactivate(p);
vm_page_busy(p);
mpte = pmap_enter_quick(pmap,
addr + i386_ptob(tmpidx),
VM_PAGE_TO_PHYS(p), mpte);
addr + i386_ptob(tmpidx), p, mpte);
vm_page_flag_set(p, PG_MAPPED);
vm_page_wakeup(p);
}
@ -2420,8 +2379,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
vm_page_deactivate(p);
vm_page_busy(p);
mpte = pmap_enter_quick(pmap,
addr + i386_ptob(tmpidx),
VM_PAGE_TO_PHYS(p), mpte);
addr + i386_ptob(tmpidx), p, mpte);
vm_page_flag_set(p, PG_MAPPED);
vm_page_wakeup(p);
}
@ -2516,8 +2474,7 @@ pmap_prefault(pmap, addra, entry)
vm_page_deactivate(m);
}
vm_page_busy(m);
mpte = pmap_enter_quick(pmap, addr,
VM_PAGE_TO_PHYS(m), mpte);
mpte = pmap_enter_quick(pmap, addr, m, mpte);
vm_page_flag_set(m, PG_MAPPED);
vm_page_wakeup(m);
}
@ -2577,6 +2534,7 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
unsigned src_frame, dst_frame;
vm_page_t m;
if (dst_addr != src_addr)
return;
@ -2659,11 +2617,11 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
* accessed (referenced) bits
* during the copy.
*/
m = PHYS_TO_VM_PAGE(ptetemp);
*dst_pte = ptetemp & ~(PG_M | PG_A);
dst_pmap->pm_stats.resident_count++;
pmap_insert_entry(dst_pmap, addr,
dstmpte,
(ptetemp & PG_FRAME));
dstmpte, m);
} else {
pmap_unwire_pte_hold(dst_pmap, dstmpte);
}
@ -2850,24 +2808,22 @@ pmap_pageable(pmap, sva, eva, pageable)
* in the given pmap.
*/
boolean_t
pmap_page_exists(pmap, pa)
pmap_page_exists(pmap, m)
pmap_t pmap;
vm_offset_t pa;
vm_page_t m;
{
register pv_entry_t pv;
pv_table_t *ppv;
int s;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
s = splvm();
ppv = pa_to_pvh(pa);
/*
* Not found, check current mappings returning immediately if found.
*/
for (pv = TAILQ_FIRST(&ppv->pv_list);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
if (pv->pv_pmap == pmap) {
@ -2894,9 +2850,9 @@ pmap_remove_pages(pmap, sva, eva)
vm_offset_t sva, eva;
{
unsigned *pte, tpte;
pv_table_t *ppv;
pv_entry_t pv, npv;
int s;
vm_page_t m;
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) {
@ -2931,9 +2887,9 @@ pmap_remove_pages(pmap, sva, eva)
}
*pte = 0;
ppv = pa_to_pvh(tpte);
m = PHYS_TO_VM_PAGE(tpte);
KASSERT(ppv < &pv_table[pv_npg],
KASSERT(m < &vm_page_array[vm_page_array_size],
("pmap_remove_pages: bad tpte %x", tpte));
pv->pv_pmap->pm_stats.resident_count--;
@ -2942,17 +2898,17 @@ pmap_remove_pages(pmap, sva, eva)
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
vm_page_dirty(ppv->pv_vm_page);
vm_page_dirty(m);
}
npv = TAILQ_NEXT(pv, pv_plist);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
ppv->pv_list_count--;
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
m->md.pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
}
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
@ -2968,25 +2924,23 @@ pmap_remove_pages(pmap, sva, eva)
* and a lot of things compile-time evaluate.
*/
static boolean_t
pmap_testbit(pa, bit)
register vm_offset_t pa;
pmap_testbit(m, bit)
vm_page_t m;
int bit;
{
register pv_entry_t pv;
pv_table_t *ppv;
pv_entry_t pv;
unsigned *pte;
int s;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
ppv = pa_to_pvh(pa);
if (TAILQ_FIRST(&ppv->pv_list) == NULL)
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
return FALSE;
s = splvm();
for (pv = TAILQ_FIRST(&ppv->pv_list);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
@ -3020,27 +2974,25 @@ pmap_testbit(pa, bit)
* this routine is used to modify bits in ptes
*/
static __inline void
pmap_changebit(pa, bit, setem)
vm_offset_t pa;
pmap_changebit(m, bit, setem)
vm_page_t m;
int bit;
boolean_t setem;
{
register pv_entry_t pv;
pv_table_t *ppv;
register unsigned *pte;
int s;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return;
s = splvm();
ppv = pa_to_pvh(pa);
/*
* Loop over all current mappings setting/clearing as appropos If
* setting RO do we need to clear the VAC?
*/
for (pv = TAILQ_FIRST(&ppv->pv_list);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
@ -3069,7 +3021,7 @@ pmap_changebit(pa, bit, setem)
if (pbits & bit) {
if (bit == PG_RW) {
if (pbits & PG_M) {
vm_page_dirty(ppv->pv_vm_page);
vm_page_dirty(m);
}
*(int *)pte = pbits & ~(PG_M|PG_RW);
} else {
@ -3082,33 +3034,19 @@ pmap_changebit(pa, bit, setem)
splx(s);
}
/*
* pmap_clearbit:
*
* Clear a bit/bits in every pte mapping a given physical page. Making
* this inline allows the pmap_changebit inline to be well optimized.
*/
static __inline void
pmap_clearbit(
vm_offset_t pa,
int bit)
{
pmap_changebit(pa, bit, FALSE);
}
/*
* pmap_page_protect:
*
* Lower the permission for all mappings to a given page.
*/
void
pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
pmap_clearbit(phys, PG_RW);
pmap_changebit(m, PG_RW, FALSE);
} else {
pmap_remove_all(phys);
pmap_remove_all(m);
}
}
}
@ -3126,31 +3064,28 @@ pmap_phys_address(ppn)
* Return the count of reference bits for a page, clearing all of them.
*/
int
pmap_ts_referenced(vm_offset_t pa)
pmap_ts_referenced(vm_page_t m)
{
register pv_entry_t pv, pvf, pvn;
pv_table_t *ppv;
unsigned *pte;
int s;
int rtval = 0;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return (rtval);
s = splvm();
ppv = pa_to_pvh(pa);
if ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pvf = pv;
do {
pvn = TAILQ_NEXT(pv, pv_list);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
if (!pmap_track_modified(pv->pv_va))
continue;
@ -3181,18 +3116,18 @@ pmap_ts_referenced(vm_offset_t pa)
* in any physical maps.
*/
boolean_t
pmap_is_modified(vm_offset_t pa)
pmap_is_modified(vm_page_t m)
{
return pmap_testbit((pa), PG_M);
return pmap_testbit(m, PG_M);
}
/*
* Clear the modify bits on the specified physical page.
*/
void
pmap_clear_modify(vm_offset_t pa)
pmap_clear_modify(vm_page_t m)
{
pmap_clearbit(pa, PG_M);
pmap_changebit(m, PG_M, FALSE);
}
/*
@ -3201,9 +3136,9 @@ pmap_clear_modify(vm_offset_t pa)
* Clear the reference bit on the specified physical page.
*/
void
pmap_clear_reference(vm_offset_t pa)
pmap_clear_reference(vm_page_t m)
{
pmap_clearbit(pa, PG_A);
pmap_changebit(m, PG_A, FALSE);
}
/*
@ -3304,7 +3239,6 @@ pmap_mincore(pmap, addr)
}
if ((pte = *ptep) != 0) {
pv_table_t *ppv;
vm_offset_t pa;
val = MINCORE_INCORE;
@ -3313,8 +3247,7 @@ pmap_mincore(pmap, addr)
pa = pte & PG_FRAME;
ppv = pa_to_pvh((pa & PG_FRAME));
m = ppv->pv_vm_page;
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
@ -3324,7 +3257,7 @@ pmap_mincore(pmap, addr)
/*
* Modified by someone
*/
else if (m->dirty || pmap_is_modified(pa))
else if (m->dirty || pmap_is_modified(m))
val |= MINCORE_MODIFIED_OTHER;
/*
* Referenced by us
@ -3335,7 +3268,7 @@ pmap_mincore(pmap, addr)
/*
* Referenced by someone
*/
else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) {
else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
val |= MINCORE_REFERENCED_OTHER;
vm_page_flag_set(m, PG_REFERENCED);
}
@ -3361,7 +3294,8 @@ pmap_activate(struct proc *p)
}
vm_offset_t
pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) {
pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
{
if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
return addr;
@ -3373,7 +3307,8 @@ pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) {
#if defined(PMAP_DEBUG)
pmap_pid_dump(int pid) {
pmap_pid_dump(int pid)
{
pmap_t pmap;
struct proc *p;
int npte = 0;
@ -3407,7 +3342,7 @@ pmap_pid_dump(int pid) {
vm_offset_t pa;
vm_page_t m;
pa = *(int *)pte;
m = PHYS_TO_VM_PAGE((pa & PG_FRAME));
m = PHYS_TO_VM_PAGE(pa);
printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
va, pa, m->hold_count, m->wire_count, m->flags);
npte++;
@ -3462,12 +3397,12 @@ void
pmap_pvdump(pa)
vm_offset_t pa;
{
pv_table_t *ppv;
register pv_entry_t pv;
vm_page_t m;
printf("pa %x", pa);
ppv = pa_to_pvh(pa);
for (pv = TAILQ_FIRST(&ppv->pv_list);
m = PHYS_TO_VM_PAGE(pa);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
#ifdef used_to_be

View File

@ -188,11 +188,11 @@ pmap_kextract(vm_offset_t va)
* Pmap stuff
*/
struct pv_entry;
typedef struct {
struct md_page {
int pv_list_count;
struct vm_page *pv_vm_page;
TAILQ_HEAD(,pv_entry) pv_list;
} pv_table_t;
};
struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */

View File

@ -617,11 +617,9 @@ dadump(dev_t dev)
while (num > 0) {
if (is_physical_memory(addr)) {
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(addr), VM_PROT_READ, TRUE);
pmap_kenter((vm_offset_t)CADDR1, trunc_page(addr));
} else {
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(0), VM_PROT_READ, TRUE);
pmap_kenter((vm_offset_t)CADDR1, trunc_page(0));
}
xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);

View File

@ -274,11 +274,9 @@ addump(dev_t dev)
while (count > 0) {
DELAY(1000);
if (is_physical_memory(addr))
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(addr), VM_PROT_READ, TRUE);
pmap_kenter((vm_offset_t)CADDR1, trunc_page(addr));
else
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(0), VM_PROT_READ, TRUE);
pmap_kenter((vm_offset_t)CADDR1, trunc_page(0));
bzero(&request, sizeof(struct ad_request));
request.device = adp;

View File

@ -176,16 +176,13 @@ mmrw(dev, uio, flags)
/* minor device 0 is physical memory */
case 0:
v = uio->uio_offset;
pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
TRUE);
pmap_kenter((vm_offset_t)ptvmmap, v);
o = (int)uio->uio_offset & PAGE_MASK;
c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
c = min(c, (u_int)(PAGE_SIZE - o));
c = min(c, (u_int)iov->iov_len);
error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
(vm_offset_t)&ptvmmap[PAGE_SIZE]);
pmap_kremove((vm_offset_t)ptvmmap);
continue;
/* minor device 1 is kernel memory */

View File

@ -144,9 +144,6 @@
#define pte_prot(m, p) (protection_codes[p])
static int protection_codes[8];
#define pa_index(pa) atop((pa) - vm_first_phys)
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
static struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
@ -155,10 +152,8 @@ vm_offset_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static vm_offset_t vm_first_phys;
static int pgeflag; /* PG_G or-in */
static int pseflag; /* PG_PS or-in */
static int pv_npg;
static vm_object_t kptobj;
@ -180,7 +175,6 @@ static struct pv_entry *pvinit;
*/
pt_entry_t *CMAP1 = 0;
static pt_entry_t *CMAP2, *ptmmap;
static pv_table_t *pv_table;
caddr_t CADDR1 = 0, ptvmmap = 0;
static caddr_t CADDR2;
static pt_entry_t *msgbufmap;
@ -197,21 +191,19 @@ static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static unsigned * get_ptbase __P((pmap_t pmap));
static pv_entry_t get_pv_entry __P((void));
static void i386_protection_init __P((void));
static __inline void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
static void pmap_clearbit __P((vm_offset_t pa, int bit));
static __inline void pmap_changebit __P((vm_page_t m, int bit, boolean_t setem));
static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa));
static void pmap_remove_all __P((vm_offset_t pa));
static void pmap_remove_all __P((vm_page_t m));
static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
vm_offset_t pa, vm_page_t mpte));
vm_page_t m, vm_page_t mpte));
static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq,
vm_offset_t sva));
static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va));
static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv,
static int pmap_remove_entry __P((struct pmap *pmap, vm_page_t m,
vm_offset_t va));
static boolean_t pmap_testbit __P((vm_offset_t pa, int bit));
static boolean_t pmap_testbit __P((vm_page_t m, int bit));
static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
vm_page_t mpte, vm_offset_t pa));
vm_page_t mpte, vm_page_t m));
static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
@ -256,7 +248,8 @@ pmap_pte(pmap, va)
* (.text, .data, .bss)
*/
static vm_offset_t
pmap_kmem_choose(vm_offset_t addr) {
pmap_kmem_choose(vm_offset_t addr)
{
vm_offset_t newaddr = addr;
#ifndef DISABLE_PSE
if (cpu_feature & CPUID_PSE) {
@ -488,8 +481,6 @@ void
pmap_init(phys_start, phys_end)
vm_offset_t phys_start, phys_end;
{
vm_offset_t addr;
vm_size_t s;
int i;
int initial_pvs;
@ -498,40 +489,30 @@ pmap_init(phys_start, phys_end)
*/
kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
/*
* calculate the number of pv_entries needed
*/
vm_first_phys = phys_avail[0];
for (i = 0; phys_avail[i + 1]; i += 2);
pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
pv_table = (pv_table_t *) addr;
for(i = 0; i < pv_npg; i++) {
vm_offset_t pa;
TAILQ_INIT(&pv_table[i].pv_list);
pv_table[i].pv_list_count = 0;
pa = vm_first_phys + i * PAGE_SIZE;
pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
for(i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* init the pv free list
*/
initial_pvs = pv_npg;
initial_pvs = vm_page_array_size;
if (initial_pvs < MINPV)
initial_pvs = MINPV;
pvzone = &pvzone_store;
pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
initial_pvs * sizeof (struct pv_entry));
zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg);
zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
vm_page_array_size);
/*
* Now it is safe to enable pv_table recording.
@ -545,34 +526,13 @@ pmap_init(phys_start, phys_end)
* numbers of pv entries.
*/
void
pmap_init2() {
pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg;
pmap_init2()
{
pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
pv_entry_high_water = 9 * (pv_entry_max / 10);
zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
}
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
vm_offset_t virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
while (start < end) {
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
}
/***************************************************
* Low level helper routines.....
@ -585,7 +545,8 @@ pmap_map(virt, start, end, prot)
* This should be an invalid condition.
*/
static int
pmap_nw_modified(pt_entry_t ptea) {
pmap_nw_modified(pt_entry_t ptea)
{
int pte;
pte = (int) ptea;
@ -603,7 +564,8 @@ pmap_nw_modified(pt_entry_t ptea) {
* not be tested for the modified bit.
*/
static PMAP_INLINE int
pmap_track_modified( vm_offset_t va) {
pmap_track_modified(vm_offset_t va)
{
if ((va < clean_sva) || (va >= clean_eva))
return 1;
else
@ -611,7 +573,8 @@ pmap_track_modified( vm_offset_t va) {
}
static PMAP_INLINE void
invltlb_1pg( vm_offset_t va) {
invltlb_1pg(vm_offset_t va)
{
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386) {
invltlb();
@ -742,26 +705,6 @@ pmap_extract(pmap, va)
}
/*
* determine if a page is managed (memory vs. device)
*/
static PMAP_INLINE int
pmap_is_managed(pa)
vm_offset_t pa;
{
int i;
if (!pmap_initialized)
return 0;
for (i = 0; phys_avail[i + 1]; i += 2) {
if (pa < phys_avail[i + 1] && pa >= phys_avail[i])
return 1;
}
return 0;
}
/***************************************************
* Low level mapping routines.....
***************************************************/
@ -801,6 +744,29 @@ pmap_kremove(va)
invltlb_1pg(va); /* XXX what about SMP? */
}
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
*
* For now, VM is already on, we only need to map the
* specified memory.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
vm_offset_t virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
while (start < end) {
pmap_kenter(virt, start);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
return (virt);
}
/*
* Add a list of wired pages to the kva
* this routine is only used for temporary
@ -1078,7 +1044,8 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
}
static PMAP_INLINE int
pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
{
vm_page_unhold(m);
if (m->hold_count == 0)
return _pmap_unwire_pte_hold(pmap, m);
@ -1539,10 +1506,9 @@ get_pv_entry(void)
* in a pinch.
*/
void
pmap_collect() {
pv_table_t *ppv;
pmap_collect()
{
int i;
vm_offset_t pa;
vm_page_t m;
static int warningdone=0;
@ -1554,16 +1520,12 @@ pmap_collect() {
warningdone++;
}
for(i = 0; i < pv_npg; i++) {
if ((ppv = &pv_table[i]) == 0)
continue;
m = ppv->pv_vm_page;
if ((pa = VM_PAGE_TO_PHYS(m)) == 0)
continue;
for(i = 0; i < vm_page_array_size; i++) {
m = &vm_page_array[i];
if (m->wire_count || m->hold_count || m->busy ||
(m->flags & PG_BUSY))
(m->flags & PG_BUSY))
continue;
pmap_remove_all(pa);
pmap_remove_all(m);
}
pmap_pagedaemon_waken = 0;
}
@ -1577,9 +1539,9 @@ pmap_collect() {
*/
static int
pmap_remove_entry(pmap, ppv, va)
pmap_remove_entry(pmap, m, va)
struct pmap *pmap;
pv_table_t *ppv;
vm_page_t m;
vm_offset_t va;
{
pv_entry_t pv;
@ -1587,8 +1549,8 @@ pmap_remove_entry(pmap, ppv, va)
int s;
s = splvm();
if (ppv->pv_list_count < pmap->pm_stats.resident_count) {
for (pv = TAILQ_FIRST(&ppv->pv_list);
if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
if (pmap == pv->pv_pmap && va == pv->pv_va)
@ -1607,10 +1569,10 @@ pmap_remove_entry(pmap, ppv, va)
if (pv) {
rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count--;
if (TAILQ_FIRST(&ppv->pv_list) == NULL)
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
free_pv_entry(pv);
@ -1625,16 +1587,15 @@ pmap_remove_entry(pmap, ppv, va)
* (pmap, va).
*/
static void
pmap_insert_entry(pmap, va, mpte, pa)
pmap_insert_entry(pmap, va, mpte, m)
pmap_t pmap;
vm_offset_t va;
vm_page_t mpte;
vm_offset_t pa;
vm_page_t m;
{
int s;
pv_entry_t pv;
pv_table_t *ppv;
s = splvm();
pv = get_pv_entry();
@ -1643,10 +1604,8 @@ pmap_insert_entry(pmap, va, mpte, pa)
pv->pv_ptem = mpte;
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
ppv = pa_to_pvh(pa);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count++;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
splx(s);
}
@ -1661,7 +1620,7 @@ pmap_remove_pte(pmap, ptq, va)
vm_offset_t va;
{
unsigned oldpte;
pv_table_t *ppv;
vm_page_t m;
oldpte = loadandclear(ptq);
if (oldpte & PG_W)
@ -1674,7 +1633,7 @@ pmap_remove_pte(pmap, ptq, va)
invlpg(va);
pmap->pm_stats.resident_count -= 1;
if (oldpte & PG_MANAGED) {
ppv = pa_to_pvh(oldpte);
m = PHYS_TO_VM_PAGE(oldpte);
if (oldpte & PG_M) {
#if defined(PMAP_DIAGNOSTIC)
if (pmap_nw_modified((pt_entry_t) oldpte)) {
@ -1684,11 +1643,11 @@ pmap_remove_pte(pmap, ptq, va)
}
#endif
if (pmap_track_modified(va))
vm_page_dirty(ppv->pv_vm_page);
vm_page_dirty(m);
}
if (oldpte & PG_A)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
return pmap_remove_entry(pmap, ppv, va);
vm_page_flag_set(m, PG_REFERENCED);
return pmap_remove_entry(pmap, m, va);
} else {
return pmap_unuse_pt(pmap, va, NULL);
}
@ -1836,11 +1795,10 @@ pmap_remove(pmap, sva, eva)
*/
static void
pmap_remove_all(pa)
vm_offset_t pa;
pmap_remove_all(m)
vm_page_t m;
{
register pv_entry_t pv;
pv_table_t *ppv;
register unsigned *pte, tpte;
int s;
@ -1849,14 +1807,13 @@ pmap_remove_all(pa)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
if (!pmap_is_managed(pa)) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", pa);
if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
}
#endif
s = splvm();
ppv = pa_to_pvh(pa);
while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pv->pv_pmap->pm_stats.resident_count--;
pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
@ -1866,7 +1823,7 @@ pmap_remove_all(pa)
pv->pv_pmap->pm_stats.wired_count--;
if (tpte & PG_A)
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
vm_page_flag_set(m, PG_REFERENCED);
/*
* Update the vm_page_t clean and reference bits.
@ -1880,18 +1837,18 @@ pmap_remove_all(pa)
}
#endif
if (pmap_track_modified(pv->pv_va))
vm_page_dirty(ppv->pv_vm_page);
vm_page_dirty(m);
}
pmap_TLB_invalidate(pv->pv_pmap, pv->pv_va);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
ppv->pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
splx(s);
}
@ -1908,7 +1865,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
vm_pindex_t sindex, eindex;
int anychanged;
if (pmap == NULL)
return;
@ -1955,22 +1911,22 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
for (; sindex != pdnxt; sindex++) {
unsigned pbits;
pv_table_t *ppv;
vm_page_t m;
pbits = ptbase[sindex];
if (pbits & PG_MANAGED) {
ppv = NULL;
m = NULL;
if (pbits & PG_A) {
ppv = pa_to_pvh(pbits);
vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
m = PHYS_TO_VM_PAGE(pbits);
vm_page_flag_set(m, PG_REFERENCED);
pbits &= ~PG_A;
}
if (pbits & PG_M) {
if (pmap_track_modified(i386_ptob(sindex))) {
if (ppv == NULL)
ppv = pa_to_pvh(pbits);
vm_page_dirty(ppv->pv_vm_page);
if (m == NULL)
m = PHYS_TO_VM_PAGE(pbits);
vm_page_dirty(m);
pbits &= ~PG_M;
}
}
@ -2001,9 +1957,10 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* insert this page into the given map NOW.
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
vm_offset_t pa;
register unsigned *pte;
vm_offset_t opa;
vm_offset_t origpte, newpte;
@ -2058,8 +2015,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
(void *)pmap->pm_pdir[PTDPTDI], va);
}
pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
origpte = *(vm_offset_t *)pte;
pa &= PG_FRAME;
opa = origpte & PG_FRAME;
if (origpte & PG_PS)
@ -2114,9 +2071,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
*/
if (origpte & PG_MANAGED) {
if ((origpte & PG_M) && pmap_track_modified(va)) {
pv_table_t *ppv;
ppv = pa_to_pvh(opa);
vm_page_dirty(ppv->pv_vm_page);
vm_page_t om;
om = PHYS_TO_VM_PAGE(opa);
vm_page_dirty(om);
}
pa |= PG_MANAGED;
}
@ -2134,12 +2091,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
}
/*
* Enter on the PV list if part of our managed memory Note that we
* Enter on the PV list if part of our managed memory. Note that we
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
if (pmap_is_managed(pa)) {
pmap_insert_entry(pmap, va, mpte, pa);
if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) {
pmap_insert_entry(pmap, va, mpte, m);
pa |= PG_MANAGED;
}
@ -2193,13 +2150,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
*/
static vm_page_t
pmap_enter_quick(pmap, va, pa, mpte)
pmap_enter_quick(pmap, va, m, mpte)
register pmap_t pmap;
vm_offset_t va;
register vm_offset_t pa;
vm_page_t m;
vm_page_t mpte;
{
register unsigned *pte;
unsigned *pte;
vm_offset_t pa;
/*
* In the case that a page table page is not
@ -2261,17 +2219,19 @@ pmap_enter_quick(pmap, va, pa, mpte)
}
/*
* Enter on the PV list if part of our managed memory Note that we
* Enter on the PV list if part of our managed memory. Note that we
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
pmap_insert_entry(pmap, va, mpte, pa);
pmap_insert_entry(pmap, va, mpte, m);
/*
* Increment counters
*/
pmap->pm_stats.resident_count++;
pa = VM_PAGE_TO_PHYS(m);
/*
* Now validate mapping with RO protection
*/
@ -2399,8 +2359,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
vm_page_deactivate(p);
vm_page_busy(p);
mpte = pmap_enter_quick(pmap,
addr + i386_ptob(tmpidx),
VM_PAGE_TO_PHYS(p), mpte);
addr + i386_ptob(tmpidx), p, mpte);
vm_page_flag_set(p, PG_MAPPED);
vm_page_wakeup(p);
}
@ -2420,8 +2379,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
vm_page_deactivate(p);
vm_page_busy(p);
mpte = pmap_enter_quick(pmap,
addr + i386_ptob(tmpidx),
VM_PAGE_TO_PHYS(p), mpte);
addr + i386_ptob(tmpidx), p, mpte);
vm_page_flag_set(p, PG_MAPPED);
vm_page_wakeup(p);
}
@ -2516,8 +2474,7 @@ pmap_prefault(pmap, addra, entry)
vm_page_deactivate(m);
}
vm_page_busy(m);
mpte = pmap_enter_quick(pmap, addr,
VM_PAGE_TO_PHYS(m), mpte);
mpte = pmap_enter_quick(pmap, addr, m, mpte);
vm_page_flag_set(m, PG_MAPPED);
vm_page_wakeup(m);
}
@ -2577,6 +2534,7 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
unsigned src_frame, dst_frame;
vm_page_t m;
if (dst_addr != src_addr)
return;
@ -2659,11 +2617,11 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
* accessed (referenced) bits
* during the copy.
*/
m = PHYS_TO_VM_PAGE(ptetemp);
*dst_pte = ptetemp & ~(PG_M | PG_A);
dst_pmap->pm_stats.resident_count++;
pmap_insert_entry(dst_pmap, addr,
dstmpte,
(ptetemp & PG_FRAME));
dstmpte, m);
} else {
pmap_unwire_pte_hold(dst_pmap, dstmpte);
}
@ -2850,24 +2808,22 @@ pmap_pageable(pmap, sva, eva, pageable)
* in the given pmap.
*/
boolean_t
pmap_page_exists(pmap, pa)
pmap_page_exists(pmap, m)
pmap_t pmap;
vm_offset_t pa;
vm_page_t m;
{
register pv_entry_t pv;
pv_table_t *ppv;
int s;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
s = splvm();
ppv = pa_to_pvh(pa);
/*
* Not found, check current mappings returning immediately if found.
*/
for (pv = TAILQ_FIRST(&ppv->pv_list);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
if (pv->pv_pmap == pmap) {
@ -2894,9 +2850,9 @@ pmap_remove_pages(pmap, sva, eva)
vm_offset_t sva, eva;
{
unsigned *pte, tpte;
pv_table_t *ppv;
pv_entry_t pv, npv;
int s;
vm_page_t m;
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) {
@ -2931,9 +2887,9 @@ pmap_remove_pages(pmap, sva, eva)
}
*pte = 0;
ppv = pa_to_pvh(tpte);
m = PHYS_TO_VM_PAGE(tpte);
KASSERT(ppv < &pv_table[pv_npg],
KASSERT(m < &vm_page_array[vm_page_array_size],
("pmap_remove_pages: bad tpte %x", tpte));
pv->pv_pmap->pm_stats.resident_count--;
@ -2942,17 +2898,17 @@ pmap_remove_pages(pmap, sva, eva)
* Update the vm_page_t clean and reference bits.
*/
if (tpte & PG_M) {
vm_page_dirty(ppv->pv_vm_page);
vm_page_dirty(m);
}
npv = TAILQ_NEXT(pv, pv_plist);
TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
ppv->pv_list_count--;
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
m->md.pv_list_count--;
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
}
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
@ -2968,25 +2924,23 @@ pmap_remove_pages(pmap, sva, eva)
* and a lot of things compile-time evaluate.
*/
static boolean_t
pmap_testbit(pa, bit)
register vm_offset_t pa;
pmap_testbit(m, bit)
vm_page_t m;
int bit;
{
register pv_entry_t pv;
pv_table_t *ppv;
pv_entry_t pv;
unsigned *pte;
int s;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return FALSE;
ppv = pa_to_pvh(pa);
if (TAILQ_FIRST(&ppv->pv_list) == NULL)
if (TAILQ_FIRST(&m->md.pv_list) == NULL)
return FALSE;
s = splvm();
for (pv = TAILQ_FIRST(&ppv->pv_list);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
@ -3020,27 +2974,25 @@ pmap_testbit(pa, bit)
* this routine is used to modify bits in ptes
*/
static __inline void
pmap_changebit(pa, bit, setem)
vm_offset_t pa;
pmap_changebit(m, bit, setem)
vm_page_t m;
int bit;
boolean_t setem;
{
register pv_entry_t pv;
pv_table_t *ppv;
register unsigned *pte;
int s;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return;
s = splvm();
ppv = pa_to_pvh(pa);
/*
* Loop over all current mappings setting/clearing as appropos If
* setting RO do we need to clear the VAC?
*/
for (pv = TAILQ_FIRST(&ppv->pv_list);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
@ -3069,7 +3021,7 @@ pmap_changebit(pa, bit, setem)
if (pbits & bit) {
if (bit == PG_RW) {
if (pbits & PG_M) {
vm_page_dirty(ppv->pv_vm_page);
vm_page_dirty(m);
}
*(int *)pte = pbits & ~(PG_M|PG_RW);
} else {
@ -3082,33 +3034,19 @@ pmap_changebit(pa, bit, setem)
splx(s);
}
/*
* pmap_clearbit:
*
* Clear a bit/bits in every pte mapping a given physical page. Making
* this inline allows the pmap_changebit inline to be well optimized.
*/
static __inline void
pmap_clearbit(
vm_offset_t pa,
int bit)
{
pmap_changebit(pa, bit, FALSE);
}
/*
* pmap_page_protect:
*
* Lower the permission for all mappings to a given page.
*/
void
pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
pmap_clearbit(phys, PG_RW);
pmap_changebit(m, PG_RW, FALSE);
} else {
pmap_remove_all(phys);
pmap_remove_all(m);
}
}
}
@ -3126,31 +3064,28 @@ pmap_phys_address(ppn)
* Return the count of reference bits for a page, clearing all of them.
*/
int
pmap_ts_referenced(vm_offset_t pa)
pmap_ts_referenced(vm_page_t m)
{
register pv_entry_t pv, pvf, pvn;
pv_table_t *ppv;
unsigned *pte;
int s;
int rtval = 0;
if (!pmap_is_managed(pa))
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
return (rtval);
s = splvm();
ppv = pa_to_pvh(pa);
if ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pvf = pv;
do {
pvn = TAILQ_NEXT(pv, pv_list);
TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
if (!pmap_track_modified(pv->pv_va))
continue;
@ -3181,18 +3116,18 @@ pmap_ts_referenced(vm_offset_t pa)
* in any physical maps.
*/
boolean_t
pmap_is_modified(vm_offset_t pa)
pmap_is_modified(vm_page_t m)
{
return pmap_testbit((pa), PG_M);
return pmap_testbit(m, PG_M);
}
/*
* Clear the modify bits on the specified physical page.
*/
void
pmap_clear_modify(vm_offset_t pa)
pmap_clear_modify(vm_page_t m)
{
pmap_clearbit(pa, PG_M);
pmap_changebit(m, PG_M, FALSE);
}
/*
@ -3201,9 +3136,9 @@ pmap_clear_modify(vm_offset_t pa)
* Clear the reference bit on the specified physical page.
*/
void
pmap_clear_reference(vm_offset_t pa)
pmap_clear_reference(vm_page_t m)
{
pmap_clearbit(pa, PG_A);
pmap_changebit(m, PG_A, FALSE);
}
/*
@ -3304,7 +3239,6 @@ pmap_mincore(pmap, addr)
}
if ((pte = *ptep) != 0) {
pv_table_t *ppv;
vm_offset_t pa;
val = MINCORE_INCORE;
@ -3313,8 +3247,7 @@ pmap_mincore(pmap, addr)
pa = pte & PG_FRAME;
ppv = pa_to_pvh((pa & PG_FRAME));
m = ppv->pv_vm_page;
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
@ -3324,7 +3257,7 @@ pmap_mincore(pmap, addr)
/*
* Modified by someone
*/
else if (m->dirty || pmap_is_modified(pa))
else if (m->dirty || pmap_is_modified(m))
val |= MINCORE_MODIFIED_OTHER;
/*
* Referenced by us
@ -3335,7 +3268,7 @@ pmap_mincore(pmap, addr)
/*
* Referenced by someone
*/
else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) {
else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
val |= MINCORE_REFERENCED_OTHER;
vm_page_flag_set(m, PG_REFERENCED);
}
@ -3361,7 +3294,8 @@ pmap_activate(struct proc *p)
}
vm_offset_t
pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) {
pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
{
if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
return addr;
@ -3373,7 +3307,8 @@ pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) {
#if defined(PMAP_DEBUG)
pmap_pid_dump(int pid) {
pmap_pid_dump(int pid)
{
pmap_t pmap;
struct proc *p;
int npte = 0;
@ -3407,7 +3342,7 @@ pmap_pid_dump(int pid) {
vm_offset_t pa;
vm_page_t m;
pa = *(int *)pte;
m = PHYS_TO_VM_PAGE((pa & PG_FRAME));
m = PHYS_TO_VM_PAGE(pa);
printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
va, pa, m->hold_count, m->wire_count, m->flags);
npte++;
@ -3462,12 +3397,12 @@ void
pmap_pvdump(pa)
vm_offset_t pa;
{
pv_table_t *ppv;
register pv_entry_t pv;
vm_page_t m;
printf("pa %x", pa);
ppv = pa_to_pvh(pa);
for (pv = TAILQ_FIRST(&ppv->pv_list);
m = PHYS_TO_VM_PAGE(pa);
for (pv = TAILQ_FIRST(&m->md.pv_list);
pv;
pv = TAILQ_NEXT(pv, pv_list)) {
#ifdef used_to_be

View File

@ -188,11 +188,11 @@ pmap_kextract(vm_offset_t va)
* Pmap stuff
*/
struct pv_entry;
typedef struct {
struct md_page {
int pv_list_count;
struct vm_page *pv_vm_page;
TAILQ_HEAD(,pv_entry) pv_list;
} pv_table_t;
};
struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */

View File

@ -2167,11 +2167,10 @@ wddump(dev_t dev)
}
while (blkcnt != 0) {
if (is_physical_memory((vm_offset_t)addr))
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page((vm_offset_t)addr), VM_PROT_READ, TRUE);
pmap_kenter((vm_offset_t)CADDR1,
trunc_page((vm_offset_t)addr));
else
pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
trunc_page(0), VM_PROT_READ, TRUE);
pmap_kenter((vm_offset_t)CADDR1, trunc_page(0));
/* Ready to send data? */
DELAY(5); /* ATA spec */

View File

@ -94,28 +94,28 @@ struct proc;
void pmap_page_is_free __P((vm_page_t m));
#endif
void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
void pmap_clear_modify __P((vm_offset_t pa));
void pmap_clear_reference __P((vm_offset_t pa));
void pmap_clear_modify __P((vm_page_t m));
void pmap_clear_reference __P((vm_page_t m));
void pmap_collect __P((void));
void pmap_copy __P((pmap_t, pmap_t, vm_offset_t, vm_size_t,
vm_offset_t));
void pmap_copy_page __P((vm_offset_t, vm_offset_t));
void pmap_destroy __P((pmap_t));
void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t,
void pmap_enter __P((pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
boolean_t));
vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
vm_offset_t pmap_extract __P((pmap_t pmap, vm_offset_t va));
void pmap_growkernel __P((vm_offset_t));
void pmap_init __P((vm_offset_t, vm_offset_t));
boolean_t pmap_is_modified __P((vm_offset_t pa));
boolean_t pmap_ts_referenced __P((vm_offset_t pa));
void pmap_kenter __P((vm_offset_t, vm_offset_t));
boolean_t pmap_is_modified __P((vm_page_t m));
boolean_t pmap_ts_referenced __P((vm_page_t m));
void pmap_kenter __P((vm_offset_t va, vm_offset_t pa));
void pmap_kremove __P((vm_offset_t));
vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_offset_t size,
int pagelimit));
boolean_t pmap_page_exists __P((pmap_t, vm_offset_t));
void pmap_page_protect __P((vm_offset_t, vm_prot_t));
boolean_t pmap_page_exists __P((pmap_t pmap, vm_page_t m));
void pmap_page_protect __P((vm_page_t m, vm_prot_t prot));
void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t,
boolean_t));
vm_offset_t pmap_phys_address __P((int));
@ -140,7 +140,7 @@ void pmap_swapout_proc __P((struct proc *p));
void pmap_swapin_proc __P((struct proc *p));
void pmap_activate __P((struct proc *p));
vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size));
void pmap_init2 __P((void));
void pmap_init2 __P((void));
#endif /* _KERNEL */

View File

@ -1592,7 +1592,7 @@ swp_pager_async_iodone(bp)
* valid bits here, it is up to the caller.
*/
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
pmap_clear_modify(m);
m->valid = VM_PAGE_BITS_ALL;
vm_page_undirty(m);
vm_page_flag_clear(m, PG_ZERO);
@ -1618,7 +1618,7 @@ swp_pager_async_iodone(bp)
* busy count and possibly wakes waiter's up ).
*/
vm_page_protect(m, VM_PROT_READ);
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
pmap_clear_modify(m);
vm_page_undirty(m);
vm_page_io_finish(m);
}

View File

@ -826,7 +826,7 @@ RetryFault:;
printf("Warning: page %p partially invalid on fault\n", fs.m);
}
pmap_enter(fs.map->pmap, vaddr, VM_PAGE_TO_PHYS(fs.m), prot, wired);
pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired);
if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
pmap_prefault(fs.map->pmap, vaddr, fs.entry);
@ -1075,8 +1075,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
*/
vm_page_flag_clear(dst_m, PG_ZERO);
pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
prot, FALSE);
pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE);
vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED);
/*

View File

@ -399,8 +399,7 @@ kmem_malloc(map, size, flags)
/*
* Because this is kernel_pmap, this call will not block.
*/
pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
VM_PROT_ALL, 1);
pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
}
vm_map_unlock(map);

View File

@ -809,10 +809,10 @@ mincore(p, uap)
if (m) {
mincoreinfo = MINCORE_INCORE;
if (m->dirty ||
pmap_is_modified(VM_PAGE_TO_PHYS(m)))
pmap_is_modified(m))
mincoreinfo |= MINCORE_MODIFIED_OTHER;
if ((m->flags & PG_REFERENCED) ||
pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) {
pmap_ts_referenced(m)) {
vm_page_flag_set(m, PG_REFERENCED);
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}

View File

@ -867,7 +867,7 @@ vm_object_madvise(object, pindex, count, advise)
* can without actually taking the step of unmapping
* it.
*/
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
pmap_clear_modify(m);
m->dirty = 0;
m->act_count = 0;
vm_page_dontneed(m);

View File

@ -119,7 +119,7 @@ vm_page_queue_init(void) {
}
vm_page_t vm_page_array = 0;
static int vm_page_array_size = 0;
int vm_page_array_size = 0;
long first_page = 0;
int vm_page_zero_count = 0;
@ -142,6 +142,30 @@ vm_set_page_size()
panic("vm_set_page_size: page size not a power of two");
}
/*
* vm_add_new_page:
*
* Add a new page to the freelist for use by the system.
* Must be called at splhigh().
*/
vm_page_t
vm_add_new_page(pa)
vm_offset_t pa;
{
vm_page_t m;
++cnt.v_page_count;
++cnt.v_free_count;
m = PHYS_TO_VM_PAGE(pa);
m->phys_addr = pa;
m->flags = 0;
m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
m->queue = m->pc + PQ_FREE;
TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq);
vm_page_queues[m->queue].lcnt++;
return (m);
}
/*
* vm_page_startup:
*
@ -159,7 +183,6 @@ vm_page_startup(starta, enda, vaddr)
register vm_offset_t vaddr;
{
register vm_offset_t mapped;
register vm_page_t m;
register struct vm_page **bucket;
vm_size_t npages, page_range;
register vm_offset_t new_start;
@ -296,15 +319,7 @@ vm_page_startup(starta, enda, vaddr)
else
pa = phys_avail[i];
while (pa < phys_avail[i + 1] && npages-- > 0) {
++cnt.v_page_count;
++cnt.v_free_count;
m = PHYS_TO_VM_PAGE(pa);
m->phys_addr = pa;
m->flags = 0;
m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
m->queue = m->pc + PQ_FREE;
TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq);
vm_page_queues[m->queue].lcnt++;
vm_add_new_page(pa);
pa += PAGE_SIZE;
}
}
@ -1518,7 +1533,7 @@ vm_page_set_validclean(m, base, size)
m->valid |= pagebits;
m->dirty &= ~pagebits;
if (base == 0 && size == PAGE_SIZE) {
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
pmap_clear_modify(m);
vm_page_flag_clear(m, PG_NOSYNC);
}
}
@ -1649,8 +1664,7 @@ void
vm_page_test_dirty(m)
vm_page_t m;
{
if ((m->dirty != VM_PAGE_BITS_ALL) &&
pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
vm_page_dirty(m);
}
}

View File

@ -117,6 +117,7 @@ struct vm_page {
vm_object_t object; /* which object am I in (O,P)*/
vm_pindex_t pindex; /* offset into object (O,P) */
vm_offset_t phys_addr; /* physical address of page */
struct md_page md; /* machine dependant stuff */
u_short queue; /* page queue index */
u_short flags, /* see below */
pc; /* page color */
@ -278,6 +279,7 @@ extern struct vpgqueues vm_page_queues[PQ_COUNT];
extern int vm_page_zero_count;
extern vm_page_t vm_page_array; /* First resident page in table */
extern int vm_page_array_size; /* number of vm_page_t's */
extern long first_page; /* first physical page number */
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
@ -396,6 +398,7 @@ vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
vm_page_t vm_add_new_page __P((vm_offset_t pa));
void vm_page_unwire __P((vm_page_t, int));
void vm_page_wire __P((vm_page_t));
void vm_page_unqueue __P((vm_page_t));
@ -448,11 +451,11 @@ vm_page_protect(vm_page_t mem, int prot)
{
if (prot == VM_PROT_NONE) {
if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE);
pmap_page_protect(mem, VM_PROT_NONE);
vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
}
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ);
pmap_page_protect(mem, VM_PROT_READ);
vm_page_flag_clear(mem, PG_WRITEABLE);
}
}

View File

@ -395,7 +395,7 @@ vm_pageout_flush(mc, count, flags)
* essentially lose the changes by pretending it
* worked.
*/
pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
pmap_clear_modify(mt);
vm_page_undirty(mt);
break;
case VM_PAGER_ERROR:
@ -475,12 +475,12 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
p->hold_count != 0 ||
p->busy != 0 ||
(p->flags & PG_BUSY) ||
!pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
!pmap_page_exists(vm_map_pmap(map), p)) {
p = next;
continue;
}
actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p));
actcount = pmap_ts_referenced(p);
if (actcount) {
vm_page_flag_set(p, PG_REFERENCED);
} else if (p->flags & PG_REFERENCED) {
@ -709,7 +709,7 @@ vm_pageout_scan()
*/
if (m->object->ref_count == 0) {
vm_page_flag_clear(m, PG_REFERENCED);
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
pmap_clear_reference(m);
/*
* Otherwise, if the page has been referenced while in the
@ -721,7 +721,7 @@ vm_pageout_scan()
* references.
*/
} else if (((m->flags & PG_REFERENCED) == 0) &&
(actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) {
(actcount = pmap_ts_referenced(m))) {
vm_page_activate(m);
m->act_count += (actcount + ACT_ADVANCE);
continue;
@ -735,7 +735,7 @@ vm_pageout_scan()
*/
if ((m->flags & PG_REFERENCED) != 0) {
vm_page_flag_clear(m, PG_REFERENCED);
actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
actcount = pmap_ts_referenced(m);
vm_page_activate(m);
m->act_count += (actcount + ACT_ADVANCE + 1);
continue;
@ -987,7 +987,7 @@ vm_pageout_scan()
if (m->flags & PG_REFERENCED) {
actcount += 1;
}
actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
actcount += pmap_ts_referenced(m);
if (actcount) {
m->act_count += ACT_ADVANCE + actcount;
if (m->act_count > ACT_MAX)
@ -1199,7 +1199,7 @@ vm_pageout_page_stats()
actcount += 1;
}
actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m));
actcount += pmap_ts_referenced(m);
if (actcount) {
m->act_count += ACT_ADVANCE + actcount;
if (m->act_count > ACT_MAX)

View File

@ -452,7 +452,7 @@ vnode_pager_input_smlfs(object, m)
}
}
vm_pager_unmap_page(kva);
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
pmap_clear_modify(m);
vm_page_flag_clear(m, PG_ZERO);
if (error) {
return VM_PAGER_ERROR;
@ -515,7 +515,7 @@ vnode_pager_input_old(object, m)
}
vm_pager_unmap_page(kva);
}
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
pmap_clear_modify(m);
vm_page_undirty(m);
vm_page_flag_clear(m, PG_ZERO);
if (!error)
@ -782,7 +782,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
mt->valid = VM_PAGE_BITS_ALL;
vm_page_undirty(mt); /* should be an assert? XXX */
pmap_clear_modify(VM_PAGE_TO_PHYS(mt));
pmap_clear_modify(mt);
} else {
/*
* Read did not fill up entire page. Since this