In general, when we map a page into the kernel's address space, we no
longer create a pv entry for that mapping. (The two exceptions are mappings into the kernel's exec and pipe submaps.) Consequently, there is no reason for get_pv_entry() to dig deep into the free page queues, i.e., use VM_ALLOC_SYSTEM, by default. This revision changes get_pv_entry() to use VM_ALLOC_NORMAL by default, i.e., before calling pmap_collect() to reclaim pv entries. Approved by: re (kensmith)
This commit is contained in:
parent
7dd9c45f26
commit
8beae25391
@ -1722,7 +1722,7 @@ get_pv_entry(pmap_t pmap, int try)
|
||||
static const struct timeval printinterval = { 60, 0 };
|
||||
static struct timeval lastprint;
|
||||
static vm_pindex_t colour;
|
||||
int bit, field, page_req;
|
||||
int bit, field;
|
||||
pv_entry_t pv;
|
||||
struct pv_chunk *pc;
|
||||
vm_page_t m;
|
||||
@ -1755,8 +1755,7 @@ get_pv_entry(pmap_t pmap, int try)
|
||||
}
|
||||
}
|
||||
/* No free items, allocate another chunk */
|
||||
page_req = try ? VM_ALLOC_NORMAL : VM_ALLOC_SYSTEM;
|
||||
m = vm_page_alloc(NULL, colour, page_req | VM_ALLOC_NOOBJ);
|
||||
m = vm_page_alloc(NULL, colour, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
|
||||
if (m == NULL) {
|
||||
if (try) {
|
||||
pv_entry_count--;
|
||||
@ -1775,7 +1774,7 @@ get_pv_entry(pmap_t pmap, int try)
|
||||
PV_STAT(pmap_collect_inactive++);
|
||||
pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
|
||||
m = vm_page_alloc(NULL, colour,
|
||||
VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
|
||||
if (m == NULL) {
|
||||
PV_STAT(pmap_collect_active++);
|
||||
pmap_collect(pmap, &vm_page_queues[PQ_ACTIVE]);
|
||||
|
@ -1795,7 +1795,7 @@ get_pv_entry(pmap_t pmap, int try)
|
||||
static const struct timeval printinterval = { 60, 0 };
|
||||
static struct timeval lastprint;
|
||||
static vm_pindex_t colour;
|
||||
int bit, field, page_req;
|
||||
int bit, field;
|
||||
pv_entry_t pv;
|
||||
struct pv_chunk *pc;
|
||||
vm_page_t m;
|
||||
@ -1830,8 +1830,7 @@ get_pv_entry(pmap_t pmap, int try)
|
||||
}
|
||||
}
|
||||
pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
|
||||
page_req = try ? VM_ALLOC_NORMAL : VM_ALLOC_SYSTEM;
|
||||
m = vm_page_alloc(NULL, colour, page_req |
|
||||
m = vm_page_alloc(NULL, colour, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
|
||||
if (m == NULL || pc == NULL) {
|
||||
if (try) {
|
||||
@ -1860,7 +1859,7 @@ get_pv_entry(pmap_t pmap, int try)
|
||||
PV_STAT(pmap_collect_inactive++);
|
||||
pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
|
||||
if (m == NULL)
|
||||
m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM |
|
||||
m = vm_page_alloc(NULL, colour, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
|
||||
if (pc == NULL)
|
||||
pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
|
||||
|
Loading…
Reference in New Issue
Block a user