- Rename vm_map_clean() to vm_map_sync(). This better reflects the fact

that msync(2) is its only caller.
 - Migrate the parts of the old vm_map_clean() that examined the internals
   of a vm object to a new function vm_object_sync() that is implemented in
   vm_object.c.  At the same, introduce the necessary vm object locking so
   that vm_map_sync() and vm_object_sync() can be called without Giant.

Reviewed by:	tegge
This commit is contained in:
Alan Cox 2003-11-09 05:25:35 +00:00
parent 1bcb5f5a96
commit 950f8459d4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=122349
5 changed files with 78 additions and 61 deletions

View File

@ -1946,7 +1946,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
}
/*
* vm_map_clean
* vm_map_sync
*
* Push any dirty cached pages in the address range to their pager.
* If syncio is TRUE, dirty pages are written synchronously.
@ -1955,7 +1955,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
* Returns an error if any part of the specified range is not mapped.
*/
int
vm_map_clean(
vm_map_sync(
vm_map_t map,
vm_offset_t start,
vm_offset_t end,
@ -1968,8 +1968,6 @@ vm_map_clean(
vm_object_t object;
vm_ooffset_t offset;
GIANT_REQUIRED;
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &entry)) {
@ -1993,9 +1991,11 @@ vm_map_clean(
}
if (invalidate) {
mtx_lock(&Giant);
vm_page_lock_queues();
pmap_remove(map->pmap, start, end);
vm_page_unlock_queues();
mtx_unlock(&Giant);
}
/*
* Make a second pass, cleaning/uncaching pages from the indicated
@ -2021,61 +2021,7 @@ vm_map_clean(
} else {
object = current->object.vm_object;
}
/*
* Note that there is absolutely no sense in writing out
* anonymous objects, so we track down the vnode object
* to write out.
* We invalidate (remove) all pages from the address space
* anyway, for semantic correctness.
*
* note: certain anonymous maps, such as MAP_NOSYNC maps,
* may start out with a NULL object.
*/
while (object && object->backing_object) {
object = object->backing_object;
offset += object->backing_object_offset;
if (object->size < OFF_TO_IDX(offset + size))
size = IDX_TO_OFF(object->size) - offset;
}
if (object && (object->type == OBJT_VNODE) &&
(current->protection & VM_PROT_WRITE)) {
/*
* Flush pages if writing is allowed, invalidate them
* if invalidation requested. Pages undergoing I/O
* will be ignored by vm_object_page_remove().
*
* We cannot lock the vnode and then wait for paging
* to complete without deadlocking against vm_fault.
* Instead we simply call vm_object_page_remove() and
* allow it to block internally on a page-by-page
* basis when it encounters pages undergoing async
* I/O.
*/
int flags;
vm_object_reference(object);
vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
flags |= invalidate ? OBJPC_INVAL : 0;
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),
flags);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(object->handle, 0, curthread);
vm_object_deallocate(object);
}
if (object && invalidate &&
((object->type == OBJT_VNODE) ||
(object->type == OBJT_DEVICE))) {
VM_OBJECT_LOCK(object);
vm_object_page_remove(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),
FALSE);
VM_OBJECT_UNLOCK(object);
}
vm_object_sync(object, offset, size, syncio, invalidate);
start += size;
}

View File

@ -337,13 +337,13 @@ int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_obje
vm_pindex_t *, vm_prot_t *, boolean_t *);
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
int vm_map_clean (vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
void vm_map_startup (void);
int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
void vm_map_simplify_entry (vm_map_t, vm_map_entry_t);
void vm_init2 (void);

View File

@ -574,7 +574,7 @@ msync(td, uap)
/*
* Clean the pages and interpret the return value.
*/
rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
(flags & MS_INVALIDATE) != 0);
done2:

View File

@ -963,6 +963,75 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
return(maxf + 1);
}
/*
* Note that there is absolutely no sense in writing out
* anonymous objects, so we track down the vnode object
* to write out.
* We invalidate (remove) all pages from the address space
* for semantic correctness.
*
* Note: certain anonymous maps, such as MAP_NOSYNC maps,
* may start out with a NULL object.
*/
void
vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
boolean_t syncio, boolean_t invalidate)
{
vm_object_t backing_object;
struct vnode *vp;
int flags;
if (object == NULL)
return;
VM_OBJECT_LOCK(object);
while ((backing_object = object->backing_object) != NULL) {
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(object);
object = backing_object;
offset += object->backing_object_offset;
if (object->size < OFF_TO_IDX(offset + size))
size = IDX_TO_OFF(object->size) - offset;
}
/*
* Flush pages if writing is allowed, invalidate them
* if invalidation requested. Pages undergoing I/O
* will be ignored by vm_object_page_remove().
*
* We cannot lock the vnode and then wait for paging
* to complete without deadlocking against vm_fault.
* Instead we simply call vm_object_page_remove() and
* allow it to block internally on a page-by-page
* basis when it encounters pages undergoing async
* I/O.
*/
if (object->type == OBJT_VNODE &&
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
vp = object->handle;
VM_OBJECT_UNLOCK(object);
mtx_lock(&Giant);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
flags |= invalidate ? OBJPC_INVAL : 0;
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),
flags);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, curthread);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(object);
}
if ((object->type == OBJT_VNODE ||
object->type == OBJT_DEVICE) && invalidate) {
vm_object_page_remove(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),
FALSE);
}
VM_OBJECT_UNLOCK(object);
}
/*
* vm_object_madvise:
*

View File

@ -216,6 +216,8 @@ void vm_object_reference (vm_object_t);
void vm_object_reference_locked(vm_object_t);
void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
void vm_object_split(vm_map_entry_t);
void vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
boolean_t);
void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
#endif /* _KERNEL */