- Eliminate the pte object.

- Use kmem_alloc_nofault() rather than kmem_alloc_pageable() to allocate
   KVA space for the page directory page(s).  Submitted by: tegge
This commit is contained in:
alc 2003-09-25 02:51:06 +00:00
parent 5505d23553
commit 3f8be813be
3 changed files with 18 additions and 28 deletions

View File

@ -188,7 +188,6 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
vm_page_t page;
static vm_page_t opage = NULL;
int ret = 0;
int pteobj_allocated = 0;
u_int32_t cr3;
u_long ef;
struct proc *p;
@ -211,10 +210,6 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
#else
load_cr3(vtophys(pm->pm_pdir));
#endif
if (pm->pm_pteobj == NULL) {
pm->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1);
pteobj_allocated = 1;
}
oldphys = pmap_extract(pm, sc->acpi_wakephys);
if (oldphys)
@ -290,10 +285,6 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
VM_PROT_READ | VM_PROT_WRITE, 0);
}
if (pteobj_allocated) {
vm_object_deallocate(pm->pm_pteobj);
pm->pm_pteobj = NULL;
}
load_cr3(cr3);
write_eflags(ef);

View File

@ -1091,7 +1091,7 @@ pmap_pinit(pmap)
* page directory table.
*/
if (pmap->pm_pdir == NULL) {
pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map,
pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
NBPTD);
#ifdef PAE
pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
@ -1103,13 +1103,6 @@ pmap_pinit(pmap)
#endif
}
/*
* allocate object for the ptes
*/
if (pmap->pm_pteobj == NULL)
pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI +
NPGPTD);
/*
* allocate the page directory page(s)
*/
@ -1187,9 +1180,10 @@ _pmap_allocpte(pmap, ptepindex)
/*
* Find or fabricate a new pagetable page
*/
VM_OBJECT_LOCK(pmap->pm_pteobj);
m = vm_page_grab(pmap->pm_pteobj, ptepindex,
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
m = vm_page_alloc(NULL, ptepindex,
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ);
if (m == NULL)
return (m);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
@ -1218,7 +1212,6 @@ _pmap_allocpte(pmap, ptepindex)
vm_page_flag_clear(m, PG_ZERO);
vm_page_wakeup(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(pmap->pm_pteobj);
return m;
}
@ -1234,7 +1227,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va)
* Calculate pagetable page index
*/
ptepindex = va >> PDRSHIFT;
retry:
/*
* Get the page directory entry
*/
@ -1257,12 +1250,16 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va)
if (ptepa) {
m = PHYS_TO_VM_PAGE(ptepa);
m->hold_count++;
return m;
} else {
/*
* Here if the pte page isn't mapped, or if it has
* been deallocated.
*/
m = _pmap_allocpte(pmap, ptepindex);
if (m == NULL)
goto retry;
}
/*
* Here if the pte page isn't mapped, or if it has been deallocated.
*/
return _pmap_allocpte(pmap, ptepindex);
return (m);
}
@ -2086,6 +2083,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
if (mpte && (mpte->pindex == ptepindex)) {
mpte->hold_count++;
} else {
retry:
/*
* Get the page directory entry
*/
@ -2102,6 +2100,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
mpte->hold_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex);
if (mpte == NULL)
goto retry;
}
}
} else {

View File

@ -281,7 +281,6 @@ struct md_page {
struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */
vm_object_t pm_pteobj; /* Container for pte's */
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
u_int pm_active; /* active on cpus */
struct pmap_statistics pm_stats; /* pmap statistics */