Change vm_page_unwire() such that it (1) accepts PQ_NONE as the specified

queue and (2) returns a Boolean indicating whether the page's wire count
transitioned to zero.

Exploit this change in vfs_vmio_release() to avoid pointlessly enqueueing
a page that is about to be freed.

(An earlier version of this change was developed by attilio@ and kmacy@.
Any errors in this version are my own.)

Reviewed by:	kib
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Alan Cox 2015-09-22 18:16:52 +00:00
parent a40531fcf8
commit 15aaea7892
3 changed files with 43 additions and 33 deletions

View File

@ -2076,6 +2076,7 @@ vfs_vmio_release(struct buf *bp)
vm_object_t obj; vm_object_t obj;
vm_page_t m; vm_page_t m;
int i; int i;
bool freed;
if (buf_mapped(bp)) { if (buf_mapped(bp)) {
BUF_CHECK_MAPPED(bp); BUF_CHECK_MAPPED(bp);
@ -2088,23 +2089,28 @@ vfs_vmio_release(struct buf *bp)
for (i = 0; i < bp->b_npages; i++) { for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i]; m = bp->b_pages[i];
bp->b_pages[i] = NULL; bp->b_pages[i] = NULL;
/*
* In order to keep page LRU ordering consistent, put
* everything on the inactive queue.
*/
vm_page_lock(m); vm_page_lock(m);
vm_page_unwire(m, PQ_INACTIVE); if (vm_page_unwire(m, PQ_NONE)) {
/*
/* * Determine if the page should be freed before adding
* Might as well free the page if we can and it has * it to the inactive queue.
* no valid data. We also free the page if the */
* buffer was used for direct I/O if ((bp->b_flags & B_ASYNC) == 0 && m->valid == 0) {
*/ freed = !vm_page_busied(m);
if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) { if (freed)
if (m->wire_count == 0 && !vm_page_busied(m)) vm_page_free(m);
vm_page_free(m); } else if ((bp->b_flags & B_DIRECT) != 0)
} else if (bp->b_flags & B_DIRECT) freed = vm_page_try_to_free(m);
vm_page_try_to_free(m); else
freed = false;
if (!freed) {
/*
* In order to maintain LRU page ordering, put
* the page at the tail of the inactive queue.
*/
vm_page_deactivate(m);
}
}
vm_page_unlock(m); vm_page_unlock(m);
} }
if (obj != NULL) if (obj != NULL)

View File

@ -2476,42 +2476,46 @@ vm_page_wire(vm_page_t m)
/* /*
* vm_page_unwire: * vm_page_unwire:
* *
* Release one wiring of the specified page, potentially enabling it to be * Release one wiring of the specified page, potentially allowing it to be
* paged again. If paging is enabled, then the value of the parameter * paged out. Returns TRUE if the number of wirings transitions to zero and
* "queue" determines the queue to which the page is added. * FALSE otherwise.
* *
* However, unless the page belongs to an object, it is not enqueued because * Only managed pages belonging to an object can be paged out. If the number
* it cannot be paged out. * of wirings transitions to zero and the page is eligible for page out, then
* the page is added to the specified paging queue (unless PQ_NONE is
* specified).
* *
* If a page is fictitious, then its wire count must always be one. * If a page is fictitious, then its wire count must always be one.
* *
* A managed page must be locked. * A managed page must be locked.
*/ */
void boolean_t
vm_page_unwire(vm_page_t m, uint8_t queue) vm_page_unwire(vm_page_t m, uint8_t queue)
{ {
KASSERT(queue < PQ_COUNT, KASSERT(queue < PQ_COUNT || queue == PQ_NONE,
("vm_page_unwire: invalid queue %u request for page %p", ("vm_page_unwire: invalid queue %u request for page %p",
queue, m)); queue, m));
if ((m->oflags & VPO_UNMANAGED) == 0) if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_lock_assert(m, MA_OWNED); vm_page_assert_locked(m);
if ((m->flags & PG_FICTITIOUS) != 0) { if ((m->flags & PG_FICTITIOUS) != 0) {
KASSERT(m->wire_count == 1, KASSERT(m->wire_count == 1,
("vm_page_unwire: fictitious page %p's wire count isn't one", m)); ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
return; return (FALSE);
} }
if (m->wire_count > 0) { if (m->wire_count > 0) {
m->wire_count--; m->wire_count--;
if (m->wire_count == 0) { if (m->wire_count == 0) {
atomic_subtract_int(&vm_cnt.v_wire_count, 1); atomic_subtract_int(&vm_cnt.v_wire_count, 1);
if ((m->oflags & VPO_UNMANAGED) != 0 || if ((m->oflags & VPO_UNMANAGED) == 0 &&
m->object == NULL) m->object != NULL && queue != PQ_NONE) {
return; if (queue == PQ_INACTIVE)
if (queue == PQ_INACTIVE) m->flags &= ~PG_WINATCFLS;
m->flags &= ~PG_WINATCFLS; vm_page_enqueue(queue, m);
vm_page_enqueue(queue, m); }
} return (TRUE);
} else
return (FALSE);
} else } else
panic("vm_page_unwire: page %p's wire count is zero", m); panic("vm_page_unwire: page %p's wire count is zero", m);
} }

View File

@ -480,7 +480,7 @@ vm_offset_t vm_page_startup(vm_offset_t vaddr);
void vm_page_sunbusy(vm_page_t m); void vm_page_sunbusy(vm_page_t m);
int vm_page_trysbusy(vm_page_t m); int vm_page_trysbusy(vm_page_t m);
void vm_page_unhold_pages(vm_page_t *ma, int count); void vm_page_unhold_pages(vm_page_t *ma, int count);
void vm_page_unwire (vm_page_t m, uint8_t queue); boolean_t vm_page_unwire(vm_page_t m, uint8_t queue);
void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_wire (vm_page_t); void vm_page_wire (vm_page_t);
void vm_page_xunbusy_hard(vm_page_t m); void vm_page_xunbusy_hard(vm_page_t m);