Correct a boundary case error in the management of a page's dirty bits by

shm_dotruncate() and vnode_pager_setsize().  Specifically, if the length of
a shared memory object or a file is truncated such that the length modulo
the page size is between 1 and 511, then all of the page's dirty bits were
cleared.  Now, a dirty bit is cleared only if the corresponding block is
truncated in its entirety.
This commit is contained in:
Alan Cox 2009-06-02 08:02:27 +00:00
parent 045e970615
commit 3c33df624c
2 changed files with 33 additions and 14 deletions

View File

@ -274,7 +274,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
/*
* If the last page is partially mapped, then zero out
* the garbage at the end of the page. See comments
* in vnode_page_setsize() for more details.
* in vnode_pager_setsize() for more details.
*
* XXXJHB: This handles in memory pages, but what about
* a page swapped out to disk?
@ -286,10 +286,23 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
int size = PAGE_SIZE - base;
pmap_zero_page_area(m, base, size);
/*
* Update the valid bits to reflect the blocks that
* have been zeroed. Some of these valid bits may
* have already been set.
*/
vm_page_set_valid(m, base, size);
/*
* Round "base" to the next block boundary so that the
* dirty bit for a partially zeroed block is not
* cleared.
*/
base = roundup2(base, DEV_BSIZE);
vm_page_lock_queues();
vm_page_set_validclean(m, base, size);
if (m->dirty != 0)
m->dirty = VM_PAGE_BITS_ALL;
vm_page_clear_dirty(m, base, PAGE_SIZE - base);
vm_page_unlock_queues();
} else if ((length & PAGE_MASK) &&
__predict_false(object->cache != NULL)) {

View File

@ -403,22 +403,28 @@ vnode_pager_setsize(vp, nsize)
pmap_zero_page_area(m, base, size);
/*
* Clear out partial-page dirty bits. This
* has the side effect of setting the valid
* bits, but that is ok. There are a bunch
* of places in the VM system where we expected
* m->dirty == VM_PAGE_BITS_ALL. The file EOF
* case is one of them. If the page is still
* partially dirty, make it fully dirty.
* Update the valid bits to reflect the blocks that
* have been zeroed. Some of these valid bits may
* have already been set.
*/
vm_page_set_valid(m, base, size);
/*
* Round "base" to the next block boundary so that the
* dirty bit for a partially zeroed block is not
* cleared.
*/
base = roundup2(base, DEV_BSIZE);
/*
* Clear out partial-page dirty bits.
*
* note that we do not clear out the valid
* bits. This would prevent bogus_page
* replacement from working properly.
*/
vm_page_lock_queues();
vm_page_set_validclean(m, base, size);
if (m->dirty != 0)
m->dirty = VM_PAGE_BITS_ALL;
vm_page_clear_dirty(m, base, PAGE_SIZE - base);
vm_page_unlock_queues();
} else if ((nsize & PAGE_MASK) &&
__predict_false(object->cache != NULL)) {