Since the introduction of the popmap to reservations in r259999, there is

no longer any need for the page's PG_CACHED and PG_FREE flags to be set and
cleared while the free page queues lock is held.  Thus, vm_page_alloc(),
vm_page_alloc_contig(), and vm_page_alloc_freelist() can wait until after
the free page queues lock is released to clear the page's flags.  Moreover,
the PG_FREE flag can be retired.  Now that the reservation system no longer
uses it, its only uses are in a few assertions.  Eliminating these
assertions is no real loss.  Other assertions catch the same types of
misbehavior, like doubly freeing a page (see r260032) or dirtying a free
page (free pages are invalid and only valid pages can be dirtied).

Eliminate an unneeded variable from vm_page_alloc_contig().

Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Alan Cox 2013-12-31 18:25:15 +00:00
parent 0646c22afc
commit 000fb817d8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=260137
3 changed files with 14 additions and 32 deletions

View File

@ -922,8 +922,6 @@ vm_page_dirty_KBI(vm_page_t m)
/* These assertions refer to this operation by its public name. */
KASSERT((m->flags & PG_CACHED) == 0,
("vm_page_dirty: page in cache!"));
KASSERT(!VM_PAGE_IS_FREE(m),
("vm_page_dirty: page is free!"));
KASSERT(m->valid == VM_PAGE_BITS_ALL,
("vm_page_dirty: page is invalid!"));
m->dirty = VM_PAGE_BITS_ALL;
@ -1568,27 +1566,24 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
vm_object_cache_is_empty(m_object))
vp = m_object->handle;
} else {
KASSERT(VM_PAGE_IS_FREE(m),
("vm_page_alloc: page %p is not free", m));
KASSERT(m->valid == 0,
("vm_page_alloc: free page %p is valid", m));
vm_phys_freecnt_adj(m, -1);
if ((m->flags & PG_ZERO) != 0)
vm_page_zero_count--;
}
mtx_unlock(&vm_page_queue_free_mtx);
/*
* Only the PG_ZERO flag is inherited. The PG_CACHED or PG_FREE flag
* must be cleared before the free page queues lock is released.
* Initialize the page. Only the PG_ZERO flag is inherited.
*/
flags = 0;
if (m->flags & PG_ZERO) {
vm_page_zero_count--;
if (req & VM_ALLOC_ZERO)
flags = PG_ZERO;
}
if (req & VM_ALLOC_NODUMP)
if ((req & VM_ALLOC_ZERO) != 0)
flags = PG_ZERO;
flags &= m->flags;
if ((req & VM_ALLOC_NODUMP) != 0)
flags |= PG_NODUMP;
m->flags = flags;
mtx_unlock(&vm_page_queue_free_mtx);
m->aflags = 0;
m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
VPO_UNMANAGED : 0;
@ -1704,7 +1699,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
struct vnode *drop;
struct spglist deferred_vdrop_list;
vm_page_t m, m_tmp, m_ret;
u_int flags, oflags;
u_int flags;
int req_class;
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
@ -1782,7 +1777,6 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
flags |= PG_NODUMP;
if ((req & VM_ALLOC_WIRED) != 0)
atomic_add_int(&cnt.v_wire_count, npages);
oflags = VPO_UNMANAGED;
if (object != NULL) {
if (object->memattr != VM_MEMATTR_DEFAULT &&
memattr == VM_MEMATTR_DEFAULT)
@ -1801,7 +1795,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
if ((req & VM_ALLOC_WIRED) != 0)
m->wire_count = 1;
/* Unmanaged pages don't use "act_count". */
m->oflags = oflags;
m->oflags = VPO_UNMANAGED;
if (object != NULL) {
if (vm_page_insert(m, object, pindex)) {
vm_page_alloc_contig_vdrop(
@ -1873,16 +1867,12 @@ vm_page_alloc_init(vm_page_t m)
vm_object_cache_is_empty(m_object))
drop = m_object->handle;
} else {
KASSERT(VM_PAGE_IS_FREE(m),
("vm_page_alloc_init: page %p is not free", m));
KASSERT(m->valid == 0,
("vm_page_alloc_init: free page %p is valid", m));
vm_phys_freecnt_adj(m, -1);
if ((m->flags & PG_ZERO) != 0)
vm_page_zero_count--;
}
/* Don't clear the PG_ZERO flag; we'll need it later. */
m->flags &= PG_ZERO;
return (drop);
}
@ -2211,10 +2201,9 @@ vm_page_cache_turn_free(vm_page_t m)
m->object = NULL;
m->valid = 0;
/* Clear PG_CACHED and set PG_FREE. */
m->flags ^= PG_CACHED | PG_FREE;
KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
("vm_page_cache_free: page %p has inconsistent flags", m));
KASSERT((m->flags & PG_CACHED) != 0,
("vm_page_cache_turn_free: page %p is not cached", m));
m->flags &= ~PG_CACHED;
cnt.v_cache_count--;
vm_phys_freecnt_adj(m, 1);
}
@ -2240,9 +2229,7 @@ vm_page_free_toq(vm_page_t m)
("vm_page_free_toq: unmanaged page %p is queued", m));
PCPU_INC(cnt.v_tfree);
if (VM_PAGE_IS_FREE(m))
panic("vm_page_free: freeing free page %p", m);
else if (vm_page_sbusied(m))
if (vm_page_sbusied(m))
panic("vm_page_free: freeing busy page %p", m);
/*
@ -2284,7 +2271,6 @@ vm_page_free_toq(vm_page_t m)
* cache/free page queues.
*/
mtx_lock(&vm_page_queue_free_mtx);
m->flags |= PG_FREE;
vm_phys_freecnt_adj(m, 1);
#if VM_NRESERVLEVEL > 0
if (!vm_reserv_free_page(m))

View File

@ -321,7 +321,6 @@ extern struct mtx_padalign pa_lock[];
* freeing, the modification must be protected by the vm_page lock.
*/
#define PG_CACHED 0x0001 /* page is cached */
#define PG_FREE 0x0002 /* page is free */
#define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
#define PG_ZERO 0x0008 /* page is zeroed */
#define PG_MARKER 0x0010 /* special queue marker page */
@ -372,8 +371,6 @@ extern vm_page_t vm_page_array; /* First resident page in table */
extern long vm_page_array_size; /* number of vm_page_t's */
extern long first_page; /* first physical page number */
#define VM_PAGE_IS_FREE(m) (((m)->flags & PG_FREE) != 0)
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);

View File

@ -391,7 +391,6 @@ vm_phys_add_page(vm_paddr_t pa)
vmd = vm_phys_domain(m);
vmd->vmd_page_count++;
vmd->vmd_segs |= 1UL << m->segind;
m->flags = PG_FREE;
KASSERT(m->order == VM_NFREEORDER,
("vm_phys_add_page: page %p has unexpected order %d",
m, m->order));