vm_pageout_flush() might cache the pages that finished write to the

backing storage. Such pages might be then reused, racing with the
assert in vm_object_page_collect_flush() that verified that dirty
pages from the run (most likely, pages with VM_PAGER_AGAIN status) are
write-protected still. In fact, the page indexes for the pages that
were removed from the object page list should be ignored by
vm_object_page_clean().

Return the length of successfully written run from vm_pageout_flush(),
that is, the count of pages between requested page and first page
after requested with status VM_PAGER_AGAIN. Supply the requested page
index in the array to vm_pageout_flush(). Use the returned run length
to forward the index of next page to clean in vm_object_page_clean().

Reported by:	avg
Reviewed by:	alc
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2010-11-18 21:09:02 +00:00
parent f86f965ef8
commit 1e8a675c73
4 changed files with 15 additions and 28 deletions

View File

@ -140,7 +140,7 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *next)
object->type == OBJT_DEFAULT) {
vm_page_unlock_queues();
m_tmp = m;
vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC);
vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 0, NULL);
VM_OBJECT_UNLOCK(object);
vm_page_lock_queues();
return (0);

View File

@ -884,30 +884,9 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
index = (maxb + i) + 1;
ma[index] = maf[i];
}
runlen = maxb + maxf + 1;
vm_pageout_flush(ma, runlen, pagerflags);
for (i = 0; i < runlen; i++) {
if (ma[i]->dirty != 0) {
KASSERT((ma[i]->flags & PG_WRITEABLE) == 0,
("vm_object_page_collect_flush: page %p is not write protected",
ma[i]));
}
}
for (i = 0; i < maxf; i++) {
if (ma[i + maxb + 1]->dirty != 0) {
/*
* maxf will end up being the actual number of pages
* we wrote out contiguously, non-inclusive of the
* first page. We do not count look-behind pages.
*/
if (maxf > i) {
maxf = i;
break;
}
}
}
return (maxf + 1);
vm_pageout_flush(ma, maxb + maxf + 1, pagerflags, maxb + 1, &runlen);
return (runlen);
}
/*

View File

@ -438,7 +438,7 @@ vm_pageout_clean(vm_page_t m)
/*
* we allow reads during pageouts...
*/
return (vm_pageout_flush(&mc[page_base], pageout_count, 0));
return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL));
}
/*
@ -449,14 +449,17 @@ vm_pageout_clean(vm_page_t m)
* reference count all in here rather then in the parent. If we want
* the parent to do more sophisticated things we may have to change
* the ordering.
*
* Returned runlen is the count of pages between mreq and first
* page after mreq with status VM_PAGER_AGAIN.
*/
int
vm_pageout_flush(vm_page_t *mc, int count, int flags)
vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen)
{
vm_object_t object = mc[0]->object;
int pageout_status[count];
int numpagedout = 0;
int i;
int i, runlen;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
@ -482,6 +485,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
vm_pager_put_pages(object, mc, count, flags, pageout_status);
runlen = count - mreq;
for (i = 0; i < count; i++) {
vm_page_t mt = mc[i];
@ -513,6 +517,8 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
vm_page_unlock(mt);
break;
case VM_PAGER_AGAIN:
if (i >= mreq && i - mreq < runlen)
runlen = i - mreq;
break;
}
@ -532,6 +538,8 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
}
}
}
if (prunlen != NULL)
*prunlen = runlen;
return (numpagedout);
}

View File

@ -102,7 +102,7 @@ extern void vm_waitpfault(void);
#ifdef _KERNEL
boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
int vm_pageout_flush(vm_page_t *, int, int);
int vm_pageout_flush(vm_page_t *, int, int, int, int *);
void vm_pageout_oom(int shortage);
boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
void vm_contig_grow_cache(int, vm_paddr_t, vm_paddr_t);