From f3e8cdafb52083eb69508899d1dc6ce03b6bc039 Mon Sep 17 00:00:00 2001 From: markj Date: Mon, 28 Aug 2017 22:10:15 +0000 Subject: [PATCH] Synchronize page laundering with pmap_extract_and_hold(). Before r207410, the hold count of a page in a page queue was protected by the queue lock, and, before laundering a page, the page daemon removed managed writeable mappings of the page before releasing the queue lock. This ensured that other threads could not concurrently create transient writeable mappings using pmap_extract_and_hold() on a user map, as is done for example by vmapbuf(). With that revision, however, a race can allow the creation of such a mapping, meaning that the page might be modified as it is being laundered, potentially resulting in it being marked clean when its contents do not match those given to the pager. Close the race by using the page lock to synchronize the hold count check in vm_pageout_cluster() with the removal of writeable managed mappings. Reported by: alc Reviewed by: alc, kib MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D12084 --- sys/vm/vm_pageout.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 2d6e9ee62b4c..921c003047b0 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -402,6 +402,8 @@ vm_pageout_cluster(vm_page_t m) */ vm_page_assert_unbusied(m); KASSERT(m->hold_count == 0, ("page %p is held", m)); + + pmap_remove_write(m); vm_page_unlock(m); mc[vm_pageout_page_count] = pb = ps = m; @@ -444,6 +446,7 @@ vm_pageout_cluster(vm_page_t m) ib = 0; break; } + pmap_remove_write(p); vm_page_unlock(p); mc[--page_base] = pb = p; ++pageout_count; @@ -469,6 +472,7 @@ vm_pageout_cluster(vm_page_t m) vm_page_unlock(p); break; } + pmap_remove_write(p); vm_page_unlock(p); mc[page_base + pageout_count] = ps = p; ++pageout_count; @@ -513,8 +517,8 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, VM_OBJECT_ASSERT_WLOCKED(object); /* - * Initiate I/O. Bump the vm_page_t->busy counter and - * mark the pages read-only. + * Initiate I/O. Mark the pages busy and verify that they're valid + * and read-only. * * We do not have to fixup the clean/dirty bits here... we can * allow the pager to do it after the I/O completes. @@ -526,8 +530,9 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush: partially invalid page %p index %d/%d", mc[i], i, count)); + KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0, + ("vm_pageout_flush: writeable page %p", mc[i])); vm_page_sbusy(mc[i]); - pmap_remove_write(mc[i]); } vm_object_pip_add(object, count);