- Eliminate the pte object.
- Use kmem_alloc_nofault() rather than kmem_alloc_pageable() to allocate KVA space for the page directory page(s). Submitted by: tegge
This commit is contained in:
parent
cc3112f108
commit
f3fd831cdd
@ -188,7 +188,6 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
|
|||||||
vm_page_t page;
|
vm_page_t page;
|
||||||
static vm_page_t opage = NULL;
|
static vm_page_t opage = NULL;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int pteobj_allocated = 0;
|
|
||||||
u_int32_t cr3;
|
u_int32_t cr3;
|
||||||
u_long ef;
|
u_long ef;
|
||||||
struct proc *p;
|
struct proc *p;
|
||||||
@ -211,10 +210,6 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
|
|||||||
#else
|
#else
|
||||||
load_cr3(vtophys(pm->pm_pdir));
|
load_cr3(vtophys(pm->pm_pdir));
|
||||||
#endif
|
#endif
|
||||||
if (pm->pm_pteobj == NULL) {
|
|
||||||
pm->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1);
|
|
||||||
pteobj_allocated = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
oldphys = pmap_extract(pm, sc->acpi_wakephys);
|
oldphys = pmap_extract(pm, sc->acpi_wakephys);
|
||||||
if (oldphys)
|
if (oldphys)
|
||||||
@ -290,10 +285,6 @@ out:
|
|||||||
VM_PROT_READ | VM_PROT_WRITE, 0);
|
VM_PROT_READ | VM_PROT_WRITE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pteobj_allocated) {
|
|
||||||
vm_object_deallocate(pm->pm_pteobj);
|
|
||||||
pm->pm_pteobj = NULL;
|
|
||||||
}
|
|
||||||
load_cr3(cr3);
|
load_cr3(cr3);
|
||||||
|
|
||||||
write_eflags(ef);
|
write_eflags(ef);
|
||||||
|
@ -1091,7 +1091,7 @@ pmap_pinit(pmap)
|
|||||||
* page directory table.
|
* page directory table.
|
||||||
*/
|
*/
|
||||||
if (pmap->pm_pdir == NULL) {
|
if (pmap->pm_pdir == NULL) {
|
||||||
pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map,
|
pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
|
||||||
NBPTD);
|
NBPTD);
|
||||||
#ifdef PAE
|
#ifdef PAE
|
||||||
pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
|
pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
|
||||||
@ -1103,13 +1103,6 @@ pmap_pinit(pmap)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* allocate object for the ptes
|
|
||||||
*/
|
|
||||||
if (pmap->pm_pteobj == NULL)
|
|
||||||
pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI +
|
|
||||||
NPGPTD);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* allocate the page directory page(s)
|
* allocate the page directory page(s)
|
||||||
*/
|
*/
|
||||||
@ -1187,9 +1180,10 @@ _pmap_allocpte(pmap, ptepindex)
|
|||||||
/*
|
/*
|
||||||
* Find or fabricate a new pagetable page
|
* Find or fabricate a new pagetable page
|
||||||
*/
|
*/
|
||||||
VM_OBJECT_LOCK(pmap->pm_pteobj);
|
m = vm_page_alloc(NULL, ptepindex,
|
||||||
m = vm_page_grab(pmap->pm_pteobj, ptepindex,
|
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ);
|
||||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
if (m == NULL)
|
||||||
|
return (m);
|
||||||
if ((m->flags & PG_ZERO) == 0)
|
if ((m->flags & PG_ZERO) == 0)
|
||||||
pmap_zero_page(m);
|
pmap_zero_page(m);
|
||||||
|
|
||||||
@ -1218,7 +1212,6 @@ _pmap_allocpte(pmap, ptepindex)
|
|||||||
vm_page_flag_clear(m, PG_ZERO);
|
vm_page_flag_clear(m, PG_ZERO);
|
||||||
vm_page_wakeup(m);
|
vm_page_wakeup(m);
|
||||||
vm_page_unlock_queues();
|
vm_page_unlock_queues();
|
||||||
VM_OBJECT_UNLOCK(pmap->pm_pteobj);
|
|
||||||
|
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
@ -1234,7 +1227,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va)
|
|||||||
* Calculate pagetable page index
|
* Calculate pagetable page index
|
||||||
*/
|
*/
|
||||||
ptepindex = va >> PDRSHIFT;
|
ptepindex = va >> PDRSHIFT;
|
||||||
|
retry:
|
||||||
/*
|
/*
|
||||||
* Get the page directory entry
|
* Get the page directory entry
|
||||||
*/
|
*/
|
||||||
@ -1257,12 +1250,16 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va)
|
|||||||
if (ptepa) {
|
if (ptepa) {
|
||||||
m = PHYS_TO_VM_PAGE(ptepa);
|
m = PHYS_TO_VM_PAGE(ptepa);
|
||||||
m->hold_count++;
|
m->hold_count++;
|
||||||
return m;
|
} else {
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Here if the pte page isn't mapped, or if it has been deallocated.
|
* Here if the pte page isn't mapped, or if it has
|
||||||
|
* been deallocated.
|
||||||
*/
|
*/
|
||||||
return _pmap_allocpte(pmap, ptepindex);
|
m = _pmap_allocpte(pmap, ptepindex);
|
||||||
|
if (m == NULL)
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
return (m);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2086,6 +2083,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
|||||||
if (mpte && (mpte->pindex == ptepindex)) {
|
if (mpte && (mpte->pindex == ptepindex)) {
|
||||||
mpte->hold_count++;
|
mpte->hold_count++;
|
||||||
} else {
|
} else {
|
||||||
|
retry:
|
||||||
/*
|
/*
|
||||||
* Get the page directory entry
|
* Get the page directory entry
|
||||||
*/
|
*/
|
||||||
@ -2102,6 +2100,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
|
|||||||
mpte->hold_count++;
|
mpte->hold_count++;
|
||||||
} else {
|
} else {
|
||||||
mpte = _pmap_allocpte(pmap, ptepindex);
|
mpte = _pmap_allocpte(pmap, ptepindex);
|
||||||
|
if (mpte == NULL)
|
||||||
|
goto retry;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -281,7 +281,6 @@ struct md_page {
|
|||||||
|
|
||||||
struct pmap {
|
struct pmap {
|
||||||
pd_entry_t *pm_pdir; /* KVA of page directory */
|
pd_entry_t *pm_pdir; /* KVA of page directory */
|
||||||
vm_object_t pm_pteobj; /* Container for pte's */
|
|
||||||
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
|
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
|
||||||
u_int pm_active; /* active on cpus */
|
u_int pm_active; /* active on cpus */
|
||||||
struct pmap_statistics pm_stats; /* pmap statistics */
|
struct pmap_statistics pm_stats; /* pmap statistics */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user