Fix two similar bugs in the populate vm_fault() code.

If pager' populate method succeeded, but other thread raced with us
and modified vm_map, we must unbusy all pages busied by the pager,
before we retry the whole fault handling.  If pager instantiated more
pages than fit into the current map entry, we must unbusy the pages
which are clipped.

Also do some refactoring, clarify comments and use more clear local
variable names.

Reported and tested by:	kargl, subbsd@gmail.com (previous version)
Reviewed by:	alc
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2016-12-30 18:55:33 +00:00
parent 8151740c88
commit 7a432b84e8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=310849

View File

@ -304,13 +304,45 @@ vm_fault_restore_map_lock(struct faultstate *fs)
fs->lookup_still_valid = true;
}
static void
vm_fault_populate_check_page(vm_page_t m)
{
/*
* Check each page to ensure that the pager is obeying the
* interface: the page must be installed in the object, fully
* valid, and exclusively busied.
*/
MPASS(m != NULL);
MPASS(m->valid == VM_PAGE_BITS_ALL);
MPASS(vm_page_xbusied(m));
}
static void
vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first,
vm_pindex_t last)
{
vm_page_t m;
vm_pindex_t pidx;
VM_OBJECT_ASSERT_WLOCKED(object);
MPASS(first <= last);
for (pidx = first, m = vm_page_lookup(object, pidx);
pidx <= last; pidx++, m = vm_page_next(m)) {
vm_fault_populate_check_page(m);
vm_page_lock(m);
vm_page_deactivate(m);
vm_page_unlock(m);
vm_page_xunbusy(m);
}
}
static int
vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold)
{
vm_page_t m;
vm_pindex_t f_first, f_last, pidx;
vm_pindex_t map_first, map_last, pager_first, pager_last, pidx;
int rv;
MPASS(fs->object == fs->first_object);
@ -319,8 +351,8 @@ vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
MPASS(fs->first_object->backing_object == NULL);
MPASS(fs->lookup_still_valid);
f_first = OFF_TO_IDX(fs->entry->offset);
f_last = OFF_TO_IDX(fs->entry->offset + fs->entry->end -
pager_first = OFF_TO_IDX(fs->entry->offset);
pager_last = OFF_TO_IDX(fs->entry->offset + fs->entry->end -
fs->entry->start) - 1;
unlock_map(fs);
unlock_vp(fs);
@ -334,7 +366,7 @@ vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
* to the driver.
*/
rv = vm_pager_populate(fs->first_object, fs->first_pindex,
fault_type, fs->entry->max_protection, &f_first, &f_last);
fault_type, fs->entry->max_protection, &pager_first, &pager_last);
VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
if (rv == VM_PAGER_BAD) {
@ -351,34 +383,40 @@ vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
return (KERN_FAILURE); /* AKA SIGSEGV */
/* Ensure that the driver is obeying the interface. */
MPASS(f_first <= f_last);
MPASS(fs->first_pindex <= f_last);
MPASS(fs->first_pindex >= f_first);
MPASS(f_last < fs->first_object->size);
MPASS(pager_first <= pager_last);
MPASS(fs->first_pindex <= pager_last);
MPASS(fs->first_pindex >= pager_first);
MPASS(pager_last < fs->first_object->size);
vm_fault_restore_map_lock(fs);
if (fs->map->timestamp != fs->map_generation)
if (fs->map->timestamp != fs->map_generation) {
vm_fault_populate_cleanup(fs->first_object, pager_first,
pager_last);
return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
}
/* Clip pager response to fit into the vm_map_entry. */
f_first = MAX(OFF_TO_IDX(fs->entry->offset), f_first);
f_last = MIN(OFF_TO_IDX(fs->entry->end - fs->entry->start +
fs->entry->offset), f_last);
pidx = f_first;
for (m = vm_page_lookup(fs->first_object, pidx); pidx <= f_last;
pidx++, m = vm_page_next(m)) {
/*
* Check each page to ensure that the driver is
* obeying the interface: the page must be installed
* in the object, fully valid, and exclusively busied.
*/
MPASS(m != NULL);
MPASS(vm_page_xbusied(m));
MPASS(m->valid == VM_PAGE_BITS_ALL);
MPASS(m->object == fs->first_object);
MPASS(m->pindex == pidx);
/*
* The map is unchanged after our last unlock. Process the fault.
*
* The range [pager_first, pager_last] that is given to the
* pager is only a hint. The pager may populate any range
* within the object that includes the requested page index.
* In case the pager expanded the range, clip it to fit into
* the map entry.
*/
map_first = MAX(OFF_TO_IDX(fs->entry->offset), pager_first);
if (map_first > pager_first)
vm_fault_populate_cleanup(fs->first_object, pager_first,
map_first - 1);
map_last = MIN(OFF_TO_IDX(fs->entry->end - fs->entry->start +
fs->entry->offset), pager_last);
if (map_last < pager_last)
vm_fault_populate_cleanup(fs->first_object, map_last + 1,
pager_last);
for (pidx = map_first, m = vm_page_lookup(fs->first_object, pidx);
pidx <= map_last; pidx++, m = vm_page_next(m)) {
vm_fault_populate_check_page(m);
vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags,
true);
VM_OBJECT_WUNLOCK(fs->first_object);