Eliminate vestiges of page coloring in VM_ALLOC_NOOBJ calls to

vm_page_alloc().  While I'm here, for the sake of consistency, always
specify the allocation class, such as VM_ALLOC_NORMAL, as the first of
the flags.
This commit is contained in:
Alan Cox 2011-10-27 16:39:17 +00:00
parent 982369192e
commit 703dec68bf
7 changed files with 17 additions and 31 deletions

View File

@ -1635,7 +1635,6 @@ int
pmap_pinit(pmap_t pmap)
{
vm_page_t pml4pg;
static vm_pindex_t color;
int i;
PMAP_LOCK_INIT(pmap);
@ -1643,8 +1642,8 @@ pmap_pinit(pmap_t pmap)
/*
* allocate the page directory page
*/
while ((pml4pg = vm_page_alloc(NULL, color++, VM_ALLOC_NOOBJ |
VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
VM_WAIT;
pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
@ -2188,7 +2187,6 @@ get_pv_entry(pmap_t pmap, int try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
static vm_pindex_t colour;
struct vpgqueues *pq;
int bit, field;
pv_entry_t pv;
@ -2228,7 +2226,7 @@ get_pv_entry(pmap_t pmap, int try)
}
}
/* No free items, allocate another chunk */
m = vm_page_alloc(NULL, colour, (pq == &vm_page_queues[PQ_ACTIVE] ?
m = vm_page_alloc(NULL, 0, (pq == &vm_page_queues[PQ_ACTIVE] ?
VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
if (m == NULL) {
@ -2255,7 +2253,6 @@ get_pv_entry(pmap_t pmap, int try)
}
PV_STAT(pc_chunk_count++);
PV_STAT(pc_chunk_allocs++);
colour++;
dump_add_page(m->phys_addr);
pc = (void *)PHYS_TO_DMAP(m->phys_addr);
pc->pc_pmap = pmap;

View File

@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$");
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
static vm_pindex_t colour;
vm_page_t m;
vm_paddr_t pa;
void *va;
@ -50,13 +49,13 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
*flags = UMA_SLAB_PRIV;
if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
else
pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
if (wait & M_ZERO)
pflags |= VM_ALLOC_ZERO;
for (;;) {
m = vm_page_alloc(NULL, colour++, pflags | VM_ALLOC_NOOBJ);
m = vm_page_alloc(NULL, 0, pflags);
if (m == NULL) {
if (wait & M_NOWAIT)
return (NULL);

View File

@ -298,8 +298,7 @@ decrease_reservation(unsigned long nr_pages)
nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) {
int color = 0;
if ((page = vm_page_alloc(NULL, color++,
if ((page = vm_page_alloc(NULL, 0,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
nr_pages = i;

View File

@ -1720,7 +1720,6 @@ pmap_pinit(pmap_t pmap)
{
vm_page_t m, ptdpg[NPGPTD];
vm_paddr_t pa;
static int color;
int i;
PMAP_LOCK_INIT(pmap);
@ -1754,9 +1753,8 @@ pmap_pinit(pmap_t pmap)
* allocate the page directory page(s)
*/
for (i = 0; i < NPGPTD;) {
m = vm_page_alloc(NULL, color++,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (m == NULL)
VM_WAIT;
else {
@ -2274,7 +2272,6 @@ get_pv_entry(pmap_t pmap, int try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
static vm_pindex_t colour;
struct vpgqueues *pq;
int bit, field;
pv_entry_t pv;
@ -2320,7 +2317,7 @@ get_pv_entry(pmap_t pmap, int try)
* queues lock. If "pv_vafree" is currently non-empty, it will
* remain non-empty until pmap_ptelist_alloc() completes.
*/
if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, (pq ==
&vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if (try) {
@ -2346,7 +2343,6 @@ get_pv_entry(pmap_t pmap, int try)
}
PV_STAT(pc_chunk_count++);
PV_STAT(pc_chunk_allocs++);
colour++;
pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
pmap_qenter((vm_offset_t)pc, &m, 1);
pc->pc_pmap = pmap;

View File

@ -1475,7 +1475,6 @@ pmap_pinit(pmap_t pmap)
{
vm_page_t m, ptdpg[NPGPTD + 1];
int npgptd = NPGPTD + 1;
static int color;
int i;
#ifdef HAMFISTED_LOCKING
@ -1507,9 +1506,8 @@ pmap_pinit(pmap_t pmap)
* allocate the page directory page(s)
*/
for (i = 0; i < npgptd;) {
m = vm_page_alloc(NULL, color++,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (m == NULL)
VM_WAIT;
else {
@ -2103,7 +2101,6 @@ get_pv_entry(pmap_t pmap, int try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
static vm_pindex_t colour;
struct vpgqueues *pq;
int bit, field;
pv_entry_t pv;
@ -2149,7 +2146,7 @@ get_pv_entry(pmap_t pmap, int try)
* queues lock. If "pv_vafree" is currently non-empty, it will
* remain non-empty until pmap_ptelist_alloc() completes.
*/
if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, (pq ==
&vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if (try) {
@ -2175,7 +2172,6 @@ get_pv_entry(pmap_t pmap, int try)
}
PV_STAT(pc_chunk_count++);
PV_STAT(pc_chunk_allocs++);
colour++;
pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
pmap_qenter((vm_offset_t)pc, &m, 1);
if ((m->flags & PG_ZERO) == 0)

View File

@ -3760,10 +3760,9 @@ vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
* could interfere with paging I/O, no matter which
* process we are.
*/
p = vm_page_alloc(NULL, pg >> PAGE_SHIFT, VM_ALLOC_NOOBJ |
VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
if (!p) {
p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
if (p == NULL) {
VM_WAIT;
goto tryagain;
}

View File

@ -543,7 +543,7 @@ kmem_init_zero_region(void)
* zeros, while not using much more physical resources.
*/
addr = kmem_alloc_nofault(kernel_map, ZERO_REGION_SIZE);
m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);