Introduce a procedure, pmap_page_init(), that initializes the

vm_page's machine-dependent fields.  Use this function in
vm_pageq_add_new_page() so that the vm_page's machine-dependent and
machine-independent fields are initialized at the same time.

Remove code from pmap_init() for initializing the vm_page's
machine-dependent fields.

Remove stale comments from pmap_init().

Eliminate the Boolean variable pmap_initialized from the alpha, amd64,
i386, and ia64 pmap implementations.  Its use is no longer required
because of the above changes and earlier changes that result in physical
memory that is being mapped at initialization time being mapped without
pv entries.

Tested by: cognet, kensmith, marcel
This commit is contained in:
Alan Cox 2005-06-10 03:33:36 +00:00
parent 3ea6bbc59a
commit 1c245ae7d1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=147217
11 changed files with 124 additions and 149 deletions

View File

@ -302,7 +302,6 @@ struct pmap kernel_pmap_store;
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static int nklev3, nklev2;
vm_offset_t kernel_vm_end;
@ -553,30 +552,25 @@ pmap_uses_prom_console()
return (cputype == ST_DEC_21000 || ST_DEC_4100);
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
* pmap_init has been enhanced to support in a fairly consistant
* way, discontiguous physical memory.
*/
void
pmap_init(void)
{
int i;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
for(i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* init the pv free list
@ -584,11 +578,6 @@ pmap_init(void)
pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
uma_prealloc(pvzone, MINPV);
/*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
/*
@ -1516,7 +1505,7 @@ pmap_remove_all(vm_page_t m)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
if (m->flags & PG_FICTITIOUS) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m));
}
#endif
@ -1745,8 +1734,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
if (pmap_initialized &&
(m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, mpte, m);
managed |= PG_MANAGED;
}
@ -2056,7 +2044,7 @@ pmap_page_exists_quick(pmap, m)
pv_entry_t pv;
int loops = 0;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return FALSE;
/*
@ -2162,7 +2150,7 @@ pmap_changebit(vm_page_t m, int bit, boolean_t setem)
pt_entry_t *pte;
int changed;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
if ((m->flags & PG_FICTITIOUS) ||
(!setem && bit == (PG_UWE|PG_KWE) &&
(m->flags & PG_WRITEABLE) == 0))
return;
@ -2247,7 +2235,7 @@ pmap_ts_referenced(vm_page_t m)
pt_entry_t *pte;
int count;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return 0;
/*
@ -2285,7 +2273,7 @@ pmap_is_modified(vm_page_t m)
boolean_t rv;
rv = FALSE;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return (rv);
/*
@ -2335,7 +2323,7 @@ pmap_clear_modify(vm_page_t m)
pv_entry_t pv;
pt_entry_t *pte;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return;
/*
@ -2364,7 +2352,7 @@ pmap_clear_reference(vm_page_t m)
pv_entry_t pv;
pt_entry_t *pte;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return;
/*

View File

@ -165,7 +165,6 @@ vm_paddr_t avail_start; /* PA of first available physical page */
vm_paddr_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
static int nkpt;
static int ndmpdp;
@ -530,30 +529,25 @@ pmap_bootstrap(firstaddr)
invltlb();
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
* pmap_init has been enhanced to support in a fairly consistant
* way, discontiguous physical memory.
*/
void
pmap_init(void)
{
int i;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
for(i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* init the pv free list
@ -561,11 +555,6 @@ pmap_init(void)
pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
uma_prealloc(pvzone, MINPV);
/*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
/*
@ -1675,7 +1664,7 @@ pmap_remove_all(vm_page_t m)
/*
* XXX This makes pmap_remove_all() illegal for non-managed pages!
*/
if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
if (m->flags & PG_FICTITIOUS) {
panic("pmap_remove_all: illegal for unmanaged page, va: 0x%lx",
VM_PAGE_TO_PHYS(m));
}
@ -1955,8 +1944,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
if (pmap_initialized &&
(m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
pa |= PG_MANAGED;
}
@ -2449,7 +2437,7 @@ pmap_page_exists_quick(pmap, m)
pv_entry_t pv;
int loops = 0;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return FALSE;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@ -2567,7 +2555,7 @@ pmap_is_modified(vm_page_t m)
boolean_t rv;
rv = FALSE;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return (rv);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@ -2628,7 +2616,7 @@ pmap_clear_ptes(vm_page_t m, long bit)
register pv_entry_t pv;
pt_entry_t pbits, *pte;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
if ((m->flags & PG_FICTITIOUS) ||
(bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
return;
@ -2713,7 +2701,7 @@ pmap_ts_referenced(vm_page_t m)
pt_entry_t v;
int rtval = 0;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return (rtval);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);

View File

@ -1927,32 +1927,27 @@ pmap_pinit0(struct pmap *pmap)
bcopy(kernel_pmap, pmap, sizeof(*pmap));
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
* pmap_init has been enhanced to support in a fairly consistant
* way, discontiguous physical memory.
*/
void
pmap_init(void)
{
int i;
PDEBUG(1, printf("pmap_init: phys_start = %08x\n"));
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
for(i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* init the pv free list
@ -3156,7 +3151,7 @@ pmap_remove_all(vm_page_t m)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
if (m->flags & PG_FICTITIOUS) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
}
#endif
@ -4331,7 +4326,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
int loops = 0;
int s;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return (FALSE);
s = splvm();

View File

@ -190,7 +190,6 @@ static struct mtx allpmaps_lock;
vm_paddr_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
int pgeflag = 0; /* PG_G or-in */
int pseflag = 0; /* PG_PS or-in */
@ -443,6 +442,17 @@ pmap_set_pg(void)
}
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
#ifdef PAE
static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt");
@ -460,26 +470,10 @@ pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
* pmap_init has been enhanced to support in a fairly consistant
* way, discontiguous physical memory.
*/
void
pmap_init(void)
{
int i;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
for(i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* init the pv free list
@ -494,11 +488,6 @@ pmap_init(void)
UMA_ZONE_VM | UMA_ZONE_NOFREE);
uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
#endif
/*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
/*
@ -1714,7 +1703,7 @@ pmap_remove_all(vm_page_t m)
/*
* XXX This makes pmap_remove_all() illegal for non-managed pages!
*/
if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
if (m->flags & PG_FICTITIOUS) {
panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x",
VM_PAGE_TO_PHYS(m));
}
@ -1997,8 +1986,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
if (pmap_initialized &&
(m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
pa |= PG_MANAGED;
}
@ -2525,7 +2513,7 @@ pmap_page_exists_quick(pmap, m)
pv_entry_t pv;
int loops = 0;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return FALSE;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
@ -2645,7 +2633,7 @@ pmap_is_modified(vm_page_t m)
boolean_t rv;
rv = FALSE;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return (rv);
sched_pin();
@ -2708,7 +2696,7 @@ pmap_clear_ptes(vm_page_t m, int bit)
register pv_entry_t pv;
pt_entry_t pbits, *pte;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
if ((m->flags & PG_FICTITIOUS) ||
(bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
return;
@ -2800,7 +2788,7 @@ pmap_ts_referenced(vm_page_t m)
pt_entry_t v;
int rtval = 0;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return (rtval);
sched_pin();

View File

@ -154,7 +154,6 @@ struct pmap kernel_pmap_store;
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
vm_offset_t vhpt_base, vhpt_size;
struct mtx pmap_vhptmutex;
@ -443,30 +442,25 @@ pmap_bootstrap()
map_gateway_page();
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
* pmap_init has been enhanced to support in a fairly consistant
* way, discontiguous physical memory.
*/
void
pmap_init(void)
{
int i;
/*
* Allocate memory for random pmap data structures. Includes the
* pv_head_table.
*/
for(i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.pv_list);
m->md.pv_list_count = 0;
}
/*
* Init the pv free list and the PTE free list.
@ -478,11 +472,6 @@ pmap_init(void)
ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
uma_prealloc(ptezone, MINPV);
/*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
/*
@ -1391,7 +1380,7 @@ pmap_remove_all(vm_page_t m)
* XXX this makes pmap_page_protect(NONE) illegal for non-managed
* pages!
*/
if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
if (m->flags & PG_FICTITIOUS) {
panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m));
}
#endif
@ -1572,8 +1561,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Enter on the PV list if part of our managed memory.
*/
if (pmap_initialized &&
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
managed = TRUE;
}
@ -1648,8 +1636,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
/*
* Enter on the PV list since its part of our managed memory.
*/
if (pmap_initialized &&
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
managed = TRUE;
}
@ -1809,7 +1796,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
int loops = 0;
int s;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return FALSE;
s = splvm();
@ -1932,7 +1919,7 @@ pmap_ts_referenced(vm_page_t m)
pv_entry_t pv;
int count = 0;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return 0;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@ -1965,7 +1952,7 @@ pmap_is_referenced(vm_page_t m)
{
pv_entry_t pv;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return FALSE;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@ -1996,7 +1983,7 @@ pmap_is_modified(vm_page_t m)
boolean_t rv;
rv = FALSE;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return (rv);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@ -2041,7 +2028,7 @@ pmap_clear_modify(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
@ -2071,7 +2058,7 @@ pmap_clear_reference(vm_page_t m)
pmap_t oldpmap;
pv_entry_t pv;
if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
if (m->flags & PG_FICTITIOUS)
return;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {

View File

@ -1136,6 +1136,14 @@ pmap_growkernel(vm_offset_t addr)
{
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
}
void
pmap_init(void)
{

View File

@ -1136,6 +1136,14 @@ pmap_growkernel(vm_offset_t addr)
{
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
}
void
pmap_init(void)
{

View File

@ -1136,6 +1136,14 @@ pmap_growkernel(vm_offset_t addr)
{
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
}
void
pmap_init(void)
{

View File

@ -554,6 +554,19 @@ pmap_bootstrap_alloc(vm_size_t size)
panic("pmap_bootstrap_alloc");
}
/*
* Initialize a vm_page's machine-dependent fields.
*/
void
pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.tte_list);
m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
m->md.flags = 0;
m->md.pmap = NULL;
}
/*
* Initialize the pmap module.
*/
@ -565,16 +578,6 @@ pmap_init(void)
int result;
int i;
for (i = 0; i < vm_page_array_size; i++) {
vm_page_t m;
m = &vm_page_array[i];
TAILQ_INIT(&m->md.tte_list);
m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
m->md.flags = 0;
m->md.pmap = NULL;
}
for (i = 0; i < translations_size; i++) {
addr = translations[i].om_start;
size = translations[i].om_size;

View File

@ -113,6 +113,7 @@ vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
void pmap_page_init(vm_page_t m);
void pmap_page_protect(vm_page_t m, vm_prot_t prot);
void pmap_pinit(pmap_t);
void pmap_pinit0(pmap_t);

View File

@ -143,6 +143,7 @@ vm_pageq_add_new_page(vm_paddr_t pa)
m->phys_addr = pa;
m->flags = 0;
m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
pmap_page_init(m);
vm_pageq_enqueue(m->pc + PQ_FREE, m);
return (m);
}