Rename VM_OBJECT_LOCK(), VM_OBJECT_UNLOCK() and VM_OBJECT_TRYLOCK() to
their "write" versions. Sponsored by: EMC / Isilon storage division
This commit is contained in:
parent
1f1e13ca03
commit
15bf891afe
@ -450,7 +450,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
ASSERT(obj != NULL);
|
||||
|
||||
off = start & PAGEOFFSET;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
|
||||
vm_page_t pp;
|
||||
int nbytes = imin(PAGESIZE - off, len);
|
||||
@ -467,23 +467,23 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
("zfs update_pages: unbusy page in putpages case"));
|
||||
KASSERT(!pmap_page_is_write_mapped(pp),
|
||||
("zfs update_pages: writable page in putpages case"));
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
va = zfs_map_page(pp, &sf);
|
||||
(void) dmu_write(os, oid, start, nbytes, va, tx);
|
||||
zfs_unmap_page(sf);
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vm_page_undirty(pp);
|
||||
} else if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
va = zfs_map_page(pp, &sf);
|
||||
(void) dmu_read(os, oid, start+off, nbytes,
|
||||
va+off, DMU_READ_PREFETCH);;
|
||||
zfs_unmap_page(sf);
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
page_unbusy(pp);
|
||||
}
|
||||
len -= nbytes;
|
||||
@ -491,7 +491,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
|
||||
}
|
||||
if (segflg != UIO_NOCOPY)
|
||||
vm_object_pip_wakeupn(obj, 0);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -523,7 +523,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
ASSERT(obj != NULL);
|
||||
ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
|
||||
int bytes = MIN(PAGESIZE, len);
|
||||
|
||||
@ -531,14 +531,14 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY);
|
||||
if (pp->valid == 0) {
|
||||
vm_page_io_start(pp);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
va = zfs_map_page(pp, &sf);
|
||||
error = dmu_read(os, zp->z_id, start, bytes, va,
|
||||
DMU_READ_PREFETCH);
|
||||
if (bytes != PAGESIZE && error == 0)
|
||||
bzero(va + bytes, PAGESIZE - bytes);
|
||||
zfs_unmap_page(sf);
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vm_page_io_finish(pp);
|
||||
vm_page_lock(pp);
|
||||
if (error) {
|
||||
@ -555,7 +555,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
uio->uio_offset += bytes;
|
||||
len -= bytes;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -587,7 +587,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
|
||||
start = uio->uio_loffset;
|
||||
off = start & PAGEOFFSET;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
|
||||
vm_page_t pp;
|
||||
uint64_t bytes = MIN(PAGESIZE - off, len);
|
||||
@ -596,23 +596,23 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
|
||||
struct sf_buf *sf;
|
||||
caddr_t va;
|
||||
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
va = zfs_map_page(pp, &sf);
|
||||
error = uiomove(va + off, bytes, UIO_READ, uio);
|
||||
zfs_unmap_page(sf);
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
page_unhold(pp);
|
||||
} else {
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
error = dmu_read_uio(os, zp->z_id, uio, bytes);
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
}
|
||||
len -= bytes;
|
||||
off = 0;
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -5683,7 +5683,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
|
||||
mfirst = m[reqstart];
|
||||
mlast = m[reqstart + reqsize - 1];
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
|
||||
for (i = 0; i < reqstart; i++) {
|
||||
vm_page_lock(m[i]);
|
||||
@ -5699,7 +5699,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
|
||||
if (mreq->valid && reqsize == 1) {
|
||||
if (mreq->valid != VM_PAGE_BITS_ALL)
|
||||
vm_page_zero_invalid(mreq, TRUE);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
return (VM_PAGER_OK);
|
||||
}
|
||||
@ -5715,7 +5715,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
|
||||
vm_page_unlock(m[i]);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
return (VM_PAGER_BAD);
|
||||
}
|
||||
@ -5724,7 +5724,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
|
||||
if (IDX_TO_OFF(mlast->pindex) + lsize > object->un_pager.vnp.vnp_size)
|
||||
lsize = object->un_pager.vnp.vnp_size - IDX_TO_OFF(mlast->pindex);
|
||||
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
for (i = reqstart; i < reqstart + reqsize; i++) {
|
||||
size = PAGE_SIZE;
|
||||
@ -5740,7 +5740,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
|
||||
break;
|
||||
}
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
|
||||
for (i = reqstart; i < reqstart + reqsize; i++) {
|
||||
if (!error)
|
||||
@ -5750,7 +5750,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
|
||||
vm_page_readahead_finish(m[i]);
|
||||
}
|
||||
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
|
@ -1033,9 +1033,9 @@ linprocfs_doprocmaps(PFS_FILL_ARGS)
|
||||
e_end = entry->end;
|
||||
obj = entry->object.vm_object;
|
||||
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
lobj = tobj;
|
||||
}
|
||||
last_timestamp = map->timestamp;
|
||||
@ -1051,11 +1051,11 @@ linprocfs_doprocmaps(PFS_FILL_ARGS)
|
||||
else
|
||||
vp = NULL;
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
flags = obj->flags;
|
||||
ref_count = obj->ref_count;
|
||||
shadow_count = obj->shadow_count;
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
if (vp) {
|
||||
vn_fullpath(td, vp, &name, &freename);
|
||||
vn_lock(vp, LK_SHARED | LK_RETRY);
|
||||
|
@ -545,7 +545,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
|
||||
* because vm_page_grab() may sleep and we can't hold a mutex
|
||||
* while sleeping.
|
||||
*/
|
||||
VM_OBJECT_LOCK(mem->am_obj);
|
||||
VM_OBJECT_WLOCK(mem->am_obj);
|
||||
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
|
||||
/*
|
||||
* Find a page from the object and wire it
|
||||
@ -558,14 +558,14 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
VM_OBJECT_WUNLOCK(mem->am_obj);
|
||||
|
||||
mtx_lock(&sc->as_lock);
|
||||
|
||||
if (mem->am_is_bound) {
|
||||
device_printf(dev, "memory already bound\n");
|
||||
error = EINVAL;
|
||||
VM_OBJECT_LOCK(mem->am_obj);
|
||||
VM_OBJECT_WLOCK(mem->am_obj);
|
||||
i = 0;
|
||||
goto bad;
|
||||
}
|
||||
@ -574,7 +574,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
|
||||
* Bind the individual pages and flush the chipset's
|
||||
* TLB.
|
||||
*/
|
||||
VM_OBJECT_LOCK(mem->am_obj);
|
||||
VM_OBJECT_WLOCK(mem->am_obj);
|
||||
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
|
||||
m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
|
||||
|
||||
@ -602,7 +602,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
|
||||
}
|
||||
vm_page_wakeup(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
VM_OBJECT_WUNLOCK(mem->am_obj);
|
||||
|
||||
/*
|
||||
* Flush the cpu cache since we are providing a new mapping
|
||||
@ -632,7 +632,7 @@ bad:
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
VM_OBJECT_WUNLOCK(mem->am_obj);
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -659,14 +659,14 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
|
||||
*/
|
||||
for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
|
||||
AGP_UNBIND_PAGE(dev, mem->am_offset + i);
|
||||
VM_OBJECT_LOCK(mem->am_obj);
|
||||
VM_OBJECT_WLOCK(mem->am_obj);
|
||||
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
|
||||
m = vm_page_lookup(mem->am_obj, atop(i));
|
||||
vm_page_lock(m);
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
VM_OBJECT_WUNLOCK(mem->am_obj);
|
||||
|
||||
agp_flush_cache();
|
||||
AGP_FLUSH_TLB(dev);
|
||||
|
@ -1968,10 +1968,10 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
|
||||
* Allocate and wire down the page now so that we can
|
||||
* get its physical address.
|
||||
*/
|
||||
VM_OBJECT_LOCK(mem->am_obj);
|
||||
VM_OBJECT_WLOCK(mem->am_obj);
|
||||
m = vm_page_grab(mem->am_obj, 0, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
VM_OBJECT_WUNLOCK(mem->am_obj);
|
||||
mem->am_physical = VM_PAGE_TO_PHYS(m);
|
||||
} else {
|
||||
/* Our allocation is already nicely wired down for us.
|
||||
@ -2006,12 +2006,12 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
|
||||
/*
|
||||
* Unwire the page which we wired in alloc_memory.
|
||||
*/
|
||||
VM_OBJECT_LOCK(mem->am_obj);
|
||||
VM_OBJECT_WLOCK(mem->am_obj);
|
||||
m = vm_page_lookup(mem->am_obj, 0);
|
||||
vm_page_lock(m);
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(mem->am_obj);
|
||||
VM_OBJECT_WUNLOCK(mem->am_obj);
|
||||
} else {
|
||||
contigfree(sc->argb_cursor, mem->am_size, M_AGP);
|
||||
sc->argb_cursor = NULL;
|
||||
|
@ -990,14 +990,14 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
|
||||
vm_obj = obj->base.vm_obj;
|
||||
ret = 0;
|
||||
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
VM_OBJECT_WLOCK(vm_obj);
|
||||
vm_object_pip_add(vm_obj, 1);
|
||||
while (size > 0) {
|
||||
obj_pi = OFF_TO_IDX(offset);
|
||||
obj_po = offset & PAGE_MASK;
|
||||
|
||||
m = i915_gem_wire_page(vm_obj, obj_pi);
|
||||
VM_OBJECT_UNLOCK(vm_obj);
|
||||
VM_OBJECT_WUNLOCK(vm_obj);
|
||||
|
||||
sched_pin();
|
||||
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
|
||||
@ -1031,7 +1031,7 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
|
||||
}
|
||||
sf_buf_free(sf);
|
||||
sched_unpin();
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
VM_OBJECT_WLOCK(vm_obj);
|
||||
if (rw == UIO_WRITE)
|
||||
vm_page_dirty(m);
|
||||
vm_page_reference(m);
|
||||
@ -1044,7 +1044,7 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
|
||||
break;
|
||||
}
|
||||
vm_object_pip_wakeup(vm_obj);
|
||||
VM_OBJECT_UNLOCK(vm_obj);
|
||||
VM_OBJECT_WUNLOCK(vm_obj);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@ -1357,7 +1357,7 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
|
||||
} else
|
||||
oldm = NULL;
|
||||
retry:
|
||||
VM_OBJECT_UNLOCK(vm_obj);
|
||||
VM_OBJECT_WUNLOCK(vm_obj);
|
||||
unlocked_vmobj:
|
||||
cause = ret = 0;
|
||||
m = NULL;
|
||||
@ -1407,7 +1407,7 @@ unlocked_vmobj:
|
||||
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
|
||||
|
||||
obj->fault_mappable = true;
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
VM_OBJECT_WLOCK(vm_obj);
|
||||
m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
|
||||
offset);
|
||||
if (m == NULL) {
|
||||
@ -1452,7 +1452,7 @@ out:
|
||||
kern_yield(PRI_USER);
|
||||
goto unlocked_vmobj;
|
||||
}
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
VM_OBJECT_WLOCK(vm_obj);
|
||||
vm_object_pip_wakeup(vm_obj);
|
||||
return (VM_PAGER_ERROR);
|
||||
}
|
||||
@ -2208,12 +2208,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
|
||||
obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
|
||||
M_WAITOK);
|
||||
vm_obj = obj->base.vm_obj;
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
VM_OBJECT_WLOCK(vm_obj);
|
||||
for (i = 0; i < page_count; i++) {
|
||||
if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
|
||||
goto failed;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(vm_obj);
|
||||
VM_OBJECT_WUNLOCK(vm_obj);
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
i915_gem_object_do_bit_17_swizzle(obj);
|
||||
return (0);
|
||||
@ -2226,7 +2226,7 @@ failed:
|
||||
vm_page_unlock(m);
|
||||
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(vm_obj);
|
||||
VM_OBJECT_WUNLOCK(vm_obj);
|
||||
free(obj->pages, DRM_I915_GEM);
|
||||
obj->pages = NULL;
|
||||
return (-EIO);
|
||||
@ -2272,7 +2272,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
if (obj->madv == I915_MADV_DONTNEED)
|
||||
obj->dirty = 0;
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
VM_OBJECT_LOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WLOCK(obj->base.vm_obj);
|
||||
#if GEM_PARANOID_CHECK_GTT
|
||||
i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
|
||||
#endif
|
||||
@ -2287,7 +2287,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
vm_page_unlock(m);
|
||||
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
|
||||
obj->dirty = 0;
|
||||
free(obj->pages, DRM_I915_GEM);
|
||||
obj->pages = NULL;
|
||||
@ -2309,7 +2309,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
|
||||
if (devobj != NULL) {
|
||||
page_count = OFF_TO_IDX(obj->base.size);
|
||||
|
||||
VM_OBJECT_LOCK(devobj);
|
||||
VM_OBJECT_WLOCK(devobj);
|
||||
retry:
|
||||
for (i = 0; i < page_count; i++) {
|
||||
m = vm_page_lookup(devobj, i);
|
||||
@ -2319,7 +2319,7 @@ retry:
|
||||
goto retry;
|
||||
cdev_pager_free_page(devobj, m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(devobj);
|
||||
VM_OBJECT_WUNLOCK(devobj);
|
||||
vm_object_deallocate(devobj);
|
||||
}
|
||||
|
||||
@ -2437,9 +2437,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
||||
vm_object_t vm_obj;
|
||||
|
||||
vm_obj = obj->base.vm_obj;
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
VM_OBJECT_WLOCK(vm_obj);
|
||||
vm_object_page_remove(vm_obj, 0, 0, false);
|
||||
VM_OBJECT_UNLOCK(vm_obj);
|
||||
VM_OBJECT_WUNLOCK(vm_obj);
|
||||
obj->madv = I915_MADV_PURGED_INTERNAL;
|
||||
}
|
||||
|
||||
@ -3567,13 +3567,13 @@ i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
vaddr = obj->phys_obj->handle->vaddr;
|
||||
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
VM_OBJECT_LOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WLOCK(obj->base.vm_obj);
|
||||
for (i = 0; i < page_count; i++) {
|
||||
m = i915_gem_wire_page(obj->base.vm_obj, i);
|
||||
if (m == NULL)
|
||||
continue; /* XXX */
|
||||
|
||||
VM_OBJECT_UNLOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
|
||||
sf = sf_buf_alloc(m, 0);
|
||||
if (sf != NULL) {
|
||||
dst = (char *)sf_buf_kva(sf);
|
||||
@ -3582,7 +3582,7 @@ i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
}
|
||||
drm_clflush_pages(&m, 1);
|
||||
|
||||
VM_OBJECT_LOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WLOCK(obj->base.vm_obj);
|
||||
vm_page_reference(m);
|
||||
vm_page_lock(m);
|
||||
vm_page_dirty(m);
|
||||
@ -3590,7 +3590,7 @@ i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
vm_page_unlock(m);
|
||||
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
|
||||
intel_gtt_chipset_flush();
|
||||
|
||||
obj->phys_obj->cur_obj = NULL;
|
||||
@ -3632,7 +3632,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
|
||||
VM_OBJECT_LOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WLOCK(obj->base.vm_obj);
|
||||
ret = 0;
|
||||
for (i = 0; i < page_count; i++) {
|
||||
m = i915_gem_wire_page(obj->base.vm_obj, i);
|
||||
@ -3640,14 +3640,14 @@ i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
|
||||
sf = sf_buf_alloc(m, 0);
|
||||
src = (char *)sf_buf_kva(sf);
|
||||
dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
sf_buf_free(sf);
|
||||
|
||||
VM_OBJECT_LOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WLOCK(obj->base.vm_obj);
|
||||
|
||||
vm_page_reference(m);
|
||||
vm_page_lock(m);
|
||||
@ -3655,7 +3655,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
vm_page_unlock(m);
|
||||
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj->base.vm_obj);
|
||||
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -1672,7 +1672,7 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
|
||||
}
|
||||
|
||||
obj = entry->object.vm_object;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
|
||||
/*
|
||||
* Walk the backing_object list to find the base
|
||||
@ -1680,9 +1680,9 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
|
||||
*/
|
||||
for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
|
||||
if (tobj != obj)
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
lobj = tobj;
|
||||
}
|
||||
|
||||
@ -1692,14 +1692,14 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
|
||||
if (lobj == NULL) {
|
||||
PMCDBG(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
|
||||
"vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (lobj->type != OBJT_VNODE || lobj->handle == NULL) {
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1711,8 +1711,8 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
|
||||
if (entry->start == last_end && lobj->handle == last_vp) {
|
||||
last_end = entry->end;
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1734,9 +1734,9 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
|
||||
vp = lobj->handle;
|
||||
vref(vp);
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
freepath = NULL;
|
||||
pmc_getfilename(vp, &fullpath, &freepath);
|
||||
|
@ -658,17 +658,17 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
|
||||
lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
|
||||
|
||||
rv = VM_PAGER_OK;
|
||||
VM_OBJECT_LOCK(sc->object);
|
||||
VM_OBJECT_WLOCK(sc->object);
|
||||
vm_object_pip_add(sc->object, 1);
|
||||
for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
|
||||
len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
|
||||
|
||||
m = vm_page_grab(sc->object, i,
|
||||
VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
|
||||
VM_OBJECT_UNLOCK(sc->object);
|
||||
VM_OBJECT_WUNLOCK(sc->object);
|
||||
sched_pin();
|
||||
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
|
||||
VM_OBJECT_LOCK(sc->object);
|
||||
VM_OBJECT_WLOCK(sc->object);
|
||||
if (bp->bio_cmd == BIO_READ) {
|
||||
if (m->valid != VM_PAGE_BITS_ALL)
|
||||
rv = vm_pager_get_pages(sc->object, &m, 1, 0);
|
||||
@ -733,7 +733,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
|
||||
offs = 0;
|
||||
}
|
||||
vm_object_pip_subtract(sc->object, 1);
|
||||
VM_OBJECT_UNLOCK(sc->object);
|
||||
VM_OBJECT_WUNLOCK(sc->object);
|
||||
return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
|
||||
}
|
||||
|
||||
@ -1069,7 +1069,7 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
|
||||
oldpages = OFF_TO_IDX(round_page(sc->mediasize));
|
||||
newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
|
||||
if (newpages < oldpages) {
|
||||
VM_OBJECT_LOCK(sc->object);
|
||||
VM_OBJECT_WLOCK(sc->object);
|
||||
vm_object_page_remove(sc->object, newpages, 0, 0);
|
||||
swap_pager_freespace(sc->object, newpages,
|
||||
oldpages - newpages);
|
||||
@ -1077,7 +1077,7 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
|
||||
newpages), sc->cred);
|
||||
sc->object->charge = IDX_TO_OFF(newpages);
|
||||
sc->object->size = newpages;
|
||||
VM_OBJECT_UNLOCK(sc->object);
|
||||
VM_OBJECT_WUNLOCK(sc->object);
|
||||
} else if (newpages > oldpages) {
|
||||
res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
|
||||
oldpages), sc->cred);
|
||||
@ -1094,10 +1094,10 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
|
||||
return (EDOM);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_LOCK(sc->object);
|
||||
VM_OBJECT_WLOCK(sc->object);
|
||||
sc->object->charge = IDX_TO_OFF(newpages);
|
||||
sc->object->size = newpages;
|
||||
VM_OBJECT_UNLOCK(sc->object);
|
||||
VM_OBJECT_WUNLOCK(sc->object);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -786,9 +786,9 @@ fuse_io_invalbuf(struct vnode *vp, struct thread *td)
|
||||
fvdat->flag |= FN_FLUSHINPROG;
|
||||
|
||||
if (vp->v_bufobj.bo_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
|
||||
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
|
||||
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
|
||||
}
|
||||
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
|
||||
while (error) {
|
||||
|
@ -1758,7 +1758,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
|
||||
* can only occur at the file EOF.
|
||||
*/
|
||||
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
fuse_vm_page_lock_queues();
|
||||
if (pages[ap->a_reqpage]->valid != 0) {
|
||||
for (i = 0; i < npages; ++i) {
|
||||
@ -1769,11 +1769,11 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
|
||||
}
|
||||
}
|
||||
fuse_vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
return 0;
|
||||
}
|
||||
fuse_vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
|
||||
/*
|
||||
* We use only the kva address for the buffer, but this is extremely
|
||||
@ -1803,7 +1803,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
|
||||
|
||||
if (error && (uio.uio_resid == count)) {
|
||||
FS_DEBUG("error %d\n", error);
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
fuse_vm_page_lock_queues();
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (i != ap->a_reqpage) {
|
||||
@ -1813,7 +1813,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
|
||||
}
|
||||
}
|
||||
fuse_vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
return VM_PAGER_ERROR;
|
||||
}
|
||||
/*
|
||||
@ -1823,7 +1823,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
|
||||
*/
|
||||
|
||||
size = count - uio.uio_resid;
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
fuse_vm_page_lock_queues();
|
||||
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
|
||||
vm_page_t m;
|
||||
@ -1886,7 +1886,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
|
||||
}
|
||||
}
|
||||
fuse_vm_page_unlock_queues();
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1975,9 +1975,9 @@ fuse_vnop_putpages(struct vop_putpages_args *ap)
|
||||
|
||||
for (i = 0; i < nwritten; i++) {
|
||||
rtvals[i] = VM_PAGER_OK;
|
||||
VM_OBJECT_LOCK(pages[i]->object);
|
||||
VM_OBJECT_WLOCK(pages[i]->object);
|
||||
vm_page_undirty(pages[i]);
|
||||
VM_OBJECT_UNLOCK(pages[i]->object);
|
||||
VM_OBJECT_WUNLOCK(pages[i]->object);
|
||||
}
|
||||
}
|
||||
return rtvals[0];
|
||||
|
@ -135,7 +135,7 @@ ncl_getpages(struct vop_getpages_args *ap)
|
||||
* allow the pager to zero-out the blanks. Partially valid pages
|
||||
* can only occur at the file EOF.
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (pages[ap->a_reqpage]->valid != 0) {
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (i != ap->a_reqpage) {
|
||||
@ -144,10 +144,10 @@ ncl_getpages(struct vop_getpages_args *ap)
|
||||
vm_page_unlock(pages[i]);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (0);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* We use only the kva address for the buffer, but this is extremely
|
||||
@ -177,7 +177,7 @@ ncl_getpages(struct vop_getpages_args *ap)
|
||||
|
||||
if (error && (uio.uio_resid == count)) {
|
||||
ncl_printf("nfs_getpages: error %d\n", error);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (i != ap->a_reqpage) {
|
||||
vm_page_lock(pages[i]);
|
||||
@ -185,7 +185,7 @@ ncl_getpages(struct vop_getpages_args *ap)
|
||||
vm_page_unlock(pages[i]);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (VM_PAGER_ERROR);
|
||||
}
|
||||
|
||||
@ -196,7 +196,7 @@ ncl_getpages(struct vop_getpages_args *ap)
|
||||
*/
|
||||
|
||||
size = count - uio.uio_resid;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
|
||||
vm_page_t m;
|
||||
nextoff = toff + PAGE_SIZE;
|
||||
@ -232,7 +232,7 @@ ncl_getpages(struct vop_getpages_args *ap)
|
||||
if (i != ap->a_reqpage)
|
||||
vm_page_readahead_finish(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1354,9 +1354,9 @@ ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
|
||||
* Now, flush as required.
|
||||
*/
|
||||
if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
|
||||
VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
|
||||
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
|
||||
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
|
||||
/*
|
||||
* If the page clean was interrupted, fail the invalidation.
|
||||
* Not doing so, we run the risk of losing dirty pages in the
|
||||
|
@ -216,10 +216,10 @@ ncl_inactive(struct vop_inactive_args *ap)
|
||||
* stateid is available for the writes.
|
||||
*/
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
retv = vm_object_page_clean(vp->v_object, 0, 0,
|
||||
OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
} else
|
||||
retv = TRUE;
|
||||
if (retv == TRUE) {
|
||||
|
@ -697,9 +697,9 @@ nfs_close(struct vop_close_args *ap)
|
||||
* mmap'ed writes or via write().
|
||||
*/
|
||||
if (nfs_clean_pages_on_close && vp->v_object) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
mtx_lock(&np->n_mtx);
|
||||
if (np->n_flag & NMODIFIED) {
|
||||
|
@ -1267,9 +1267,9 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
|
||||
*/
|
||||
if (vp->v_object &&
|
||||
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
error = VOP_FSYNC(vp, MNT_WAIT, td);
|
||||
} else {
|
||||
@ -1298,10 +1298,10 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
|
||||
|
||||
if (vp->v_object &&
|
||||
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, off, off + cnt,
|
||||
OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
|
||||
bo = &vp->v_bufobj;
|
||||
|
@ -132,7 +132,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
|
||||
privateresident = 0;
|
||||
obj = entry->object.vm_object;
|
||||
if (obj != NULL) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
if (obj->shadow_count == 1)
|
||||
privateresident = obj->resident_page_count;
|
||||
}
|
||||
@ -148,9 +148,9 @@ procfs_doprocmap(PFS_FILL_ARGS)
|
||||
|
||||
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
|
||||
if (tobj != obj)
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
lobj = tobj;
|
||||
}
|
||||
last_timestamp = map->timestamp;
|
||||
@ -181,12 +181,12 @@ procfs_doprocmap(PFS_FILL_ARGS)
|
||||
break;
|
||||
}
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
|
||||
flags = obj->flags;
|
||||
ref_count = obj->ref_count;
|
||||
shadow_count = obj->shadow_count;
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
if (vp != NULL) {
|
||||
vn_fullpath(td, vp, &fullpath, &freepath);
|
||||
vrele(vp);
|
||||
|
@ -1272,7 +1272,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
|
||||
tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
|
||||
return (ENOSPC);
|
||||
|
||||
VM_OBJECT_LOCK(uobj);
|
||||
VM_OBJECT_WLOCK(uobj);
|
||||
if (newsize < oldsize) {
|
||||
/*
|
||||
* Zero the truncated part of the last page.
|
||||
@ -1292,9 +1292,9 @@ retry:
|
||||
} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
|
||||
m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL);
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_UNLOCK(uobj);
|
||||
VM_OBJECT_WUNLOCK(uobj);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(uobj);
|
||||
VM_OBJECT_WLOCK(uobj);
|
||||
goto retry;
|
||||
} else if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
ma[0] = m;
|
||||
@ -1314,7 +1314,7 @@ retry:
|
||||
if (ignerr)
|
||||
m = NULL;
|
||||
else {
|
||||
VM_OBJECT_UNLOCK(uobj);
|
||||
VM_OBJECT_WUNLOCK(uobj);
|
||||
return (EIO);
|
||||
}
|
||||
}
|
||||
@ -1336,7 +1336,7 @@ retry:
|
||||
}
|
||||
}
|
||||
uobj->size = newpages;
|
||||
VM_OBJECT_UNLOCK(uobj);
|
||||
VM_OBJECT_WUNLOCK(uobj);
|
||||
|
||||
TMPFS_LOCK(tmp);
|
||||
tmp->tm_pages_used += (newpages - oldpages);
|
||||
|
@ -447,7 +447,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
|
||||
vm_page_t m;
|
||||
int error, rv;
|
||||
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
m = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
@ -457,20 +457,20 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
|
||||
vm_page_lock(m);
|
||||
vm_page_free(m);
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(tobj);
|
||||
VM_OBJECT_WUNLOCK(tobj);
|
||||
return (EIO);
|
||||
}
|
||||
} else
|
||||
vm_page_zero_invalid(m, TRUE);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(tobj);
|
||||
VM_OBJECT_WUNLOCK(tobj);
|
||||
error = uiomove_fromphys(&m, offset, tlen, uio);
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
vm_page_lock(m);
|
||||
vm_page_unwire(m, TRUE);
|
||||
vm_page_unlock(m);
|
||||
vm_page_wakeup(m);
|
||||
VM_OBJECT_UNLOCK(tobj);
|
||||
VM_OBJECT_WUNLOCK(tobj);
|
||||
|
||||
return (error);
|
||||
}
|
||||
@ -513,7 +513,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
|
||||
offset = addr & PAGE_MASK;
|
||||
tlen = MIN(PAGE_SIZE - offset, len);
|
||||
|
||||
VM_OBJECT_LOCK(vobj);
|
||||
VM_OBJECT_WLOCK(vobj);
|
||||
lookupvpg:
|
||||
if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
|
||||
vm_page_is_valid(m, offset, tlen)) {
|
||||
@ -527,11 +527,11 @@ lookupvpg:
|
||||
goto lookupvpg;
|
||||
}
|
||||
vm_page_busy(m);
|
||||
VM_OBJECT_UNLOCK(vobj);
|
||||
VM_OBJECT_WUNLOCK(vobj);
|
||||
error = uiomove_fromphys(&m, offset, tlen, uio);
|
||||
VM_OBJECT_LOCK(vobj);
|
||||
VM_OBJECT_WLOCK(vobj);
|
||||
vm_page_wakeup(m);
|
||||
VM_OBJECT_UNLOCK(vobj);
|
||||
VM_OBJECT_WUNLOCK(vobj);
|
||||
return (error);
|
||||
} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
|
||||
KASSERT(offset == 0,
|
||||
@ -546,7 +546,7 @@ lookupvpg:
|
||||
goto lookupvpg;
|
||||
}
|
||||
vm_page_busy(m);
|
||||
VM_OBJECT_UNLOCK(vobj);
|
||||
VM_OBJECT_WUNLOCK(vobj);
|
||||
sched_pin();
|
||||
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
|
||||
ma = (char *)sf_buf_kva(sf);
|
||||
@ -559,14 +559,14 @@ lookupvpg:
|
||||
}
|
||||
sf_buf_free(sf);
|
||||
sched_unpin();
|
||||
VM_OBJECT_LOCK(vobj);
|
||||
VM_OBJECT_WLOCK(vobj);
|
||||
if (error == 0)
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_wakeup(m);
|
||||
VM_OBJECT_UNLOCK(vobj);
|
||||
VM_OBJECT_WUNLOCK(vobj);
|
||||
return (error);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(vobj);
|
||||
VM_OBJECT_WUNLOCK(vobj);
|
||||
error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);
|
||||
|
||||
return (error);
|
||||
@ -636,7 +636,7 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
|
||||
offset = addr & PAGE_MASK;
|
||||
tlen = MIN(PAGE_SIZE - offset, len);
|
||||
|
||||
VM_OBJECT_LOCK(vobj);
|
||||
VM_OBJECT_WLOCK(vobj);
|
||||
lookupvpg:
|
||||
if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
|
||||
vm_page_is_valid(vpg, offset, tlen)) {
|
||||
@ -651,15 +651,15 @@ lookupvpg:
|
||||
}
|
||||
vm_page_busy(vpg);
|
||||
vm_page_undirty(vpg);
|
||||
VM_OBJECT_UNLOCK(vobj);
|
||||
VM_OBJECT_WUNLOCK(vobj);
|
||||
error = uiomove_fromphys(&vpg, offset, tlen, uio);
|
||||
} else {
|
||||
if (vm_page_is_cached(vobj, idx))
|
||||
vm_page_cache_free(vobj, idx, idx + 1);
|
||||
VM_OBJECT_UNLOCK(vobj);
|
||||
VM_OBJECT_WUNLOCK(vobj);
|
||||
vpg = NULL;
|
||||
}
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (tpg->valid != VM_PAGE_BITS_ALL) {
|
||||
@ -675,14 +675,14 @@ lookupvpg:
|
||||
} else
|
||||
vm_page_zero_invalid(tpg, TRUE);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(tobj);
|
||||
VM_OBJECT_WUNLOCK(tobj);
|
||||
if (vpg == NULL)
|
||||
error = uiomove_fromphys(&tpg, offset, tlen, uio);
|
||||
else {
|
||||
KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
|
||||
pmap_copy_page(vpg, tpg);
|
||||
}
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
if (error == 0) {
|
||||
KASSERT(tpg->valid == VM_PAGE_BITS_ALL,
|
||||
("parts of tpg invalid"));
|
||||
@ -693,11 +693,11 @@ lookupvpg:
|
||||
vm_page_unlock(tpg);
|
||||
vm_page_wakeup(tpg);
|
||||
out:
|
||||
VM_OBJECT_UNLOCK(tobj);
|
||||
VM_OBJECT_WUNLOCK(tobj);
|
||||
if (vpg != NULL) {
|
||||
VM_OBJECT_LOCK(vobj);
|
||||
VM_OBJECT_WLOCK(vobj);
|
||||
vm_page_wakeup(vpg);
|
||||
VM_OBJECT_UNLOCK(vobj);
|
||||
VM_OBJECT_WUNLOCK(vobj);
|
||||
}
|
||||
|
||||
return (error);
|
||||
|
@ -1278,15 +1278,15 @@ each_writable_segment(td, func, closure)
|
||||
continue;
|
||||
|
||||
/* Ignore memory-mapped devices and such things. */
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
while ((backing_object = object->backing_object) != NULL) {
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = backing_object;
|
||||
}
|
||||
ignore_entry = object->type != OBJT_DEFAULT &&
|
||||
object->type != OBJT_SWAP && object->type != OBJT_VNODE;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (ignore_entry)
|
||||
continue;
|
||||
|
||||
|
@ -933,7 +933,7 @@ exec_map_first_page(imgp)
|
||||
object = imgp->vp->v_object;
|
||||
if (object == NULL)
|
||||
return (EACCES);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if ((object->flags & OBJ_COLORED) == 0) {
|
||||
object->flags |= OBJ_COLORED;
|
||||
@ -968,7 +968,7 @@ exec_map_first_page(imgp)
|
||||
vm_page_free(ma[0]);
|
||||
vm_page_unlock(ma[0]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (EIO);
|
||||
}
|
||||
}
|
||||
@ -976,7 +976,7 @@ exec_map_first_page(imgp)
|
||||
vm_page_hold(ma[0]);
|
||||
vm_page_unlock(ma[0]);
|
||||
vm_page_wakeup(ma[0]);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
imgp->firstpage = sf_buf_alloc(ma[0], 0);
|
||||
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
|
||||
|
@ -1995,7 +1995,7 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
|
||||
kve->kve_private_resident = 0;
|
||||
obj = entry->object.vm_object;
|
||||
if (obj != NULL) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
if (obj->shadow_count == 1)
|
||||
kve->kve_private_resident =
|
||||
obj->resident_page_count;
|
||||
@ -2010,9 +2010,9 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
|
||||
|
||||
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
|
||||
if (tobj != obj)
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
lobj = tobj;
|
||||
}
|
||||
|
||||
@ -2072,11 +2072,11 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
|
||||
break;
|
||||
}
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
|
||||
kve->kve_ref_count = obj->ref_count;
|
||||
kve->kve_shadow_count = obj->shadow_count;
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
if (vp != NULL) {
|
||||
vn_fullpath(curthread, vp, &fullpath,
|
||||
&freepath);
|
||||
@ -2162,7 +2162,7 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
|
||||
kve->kve_private_resident = 0;
|
||||
obj = entry->object.vm_object;
|
||||
if (obj != NULL) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
if (obj->shadow_count == 1)
|
||||
kve->kve_private_resident =
|
||||
obj->resident_page_count;
|
||||
@ -2183,9 +2183,9 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
|
||||
|
||||
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
|
||||
if (tobj != obj)
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
lobj = tobj;
|
||||
}
|
||||
|
||||
@ -2247,11 +2247,11 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
|
||||
break;
|
||||
}
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
|
||||
kve->kve_ref_count = obj->ref_count;
|
||||
kve->kve_shadow_count = obj->shadow_count;
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
if (vp != NULL) {
|
||||
vn_fullpath(curthread, vp, &fullpath,
|
||||
&freepath);
|
||||
|
@ -107,11 +107,11 @@ shared_page_init(void *dummy __unused)
|
||||
sx_init(&shared_page_alloc_sx, "shpsx");
|
||||
shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
|
||||
VM_PROT_DEFAULT, 0, NULL);
|
||||
VM_OBJECT_LOCK(shared_page_obj);
|
||||
VM_OBJECT_WLOCK(shared_page_obj);
|
||||
m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_ZERO);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
VM_OBJECT_UNLOCK(shared_page_obj);
|
||||
VM_OBJECT_WUNLOCK(shared_page_obj);
|
||||
addr = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
|
||||
pmap_qenter(addr, &m, 1);
|
||||
shared_page_mapping = (char *)addr;
|
||||
|
@ -104,7 +104,7 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
|
||||
&upindex, &prot, &wired)) != KERN_SUCCESS) {
|
||||
return(EFAULT);
|
||||
}
|
||||
VM_OBJECT_LOCK(uobject);
|
||||
VM_OBJECT_WLOCK(uobject);
|
||||
retry:
|
||||
if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
|
||||
if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
|
||||
@ -124,7 +124,7 @@ retry:
|
||||
}
|
||||
vm_page_insert(kern_pg, uobject, upindex);
|
||||
vm_page_dirty(kern_pg);
|
||||
VM_OBJECT_UNLOCK(uobject);
|
||||
VM_OBJECT_WUNLOCK(uobject);
|
||||
vm_map_lookup_done(map, entry);
|
||||
return(KERN_SUCCESS);
|
||||
}
|
||||
|
@ -382,7 +382,7 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
|
||||
|
||||
obj = entry->object.vm_object;
|
||||
if (obj != NULL)
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
} while (0);
|
||||
|
||||
vm_map_unlock_read(map);
|
||||
@ -395,9 +395,9 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
|
||||
lobj = obj;
|
||||
for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
|
||||
if (tobj != obj)
|
||||
VM_OBJECT_LOCK(tobj);
|
||||
VM_OBJECT_WLOCK(tobj);
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
lobj = tobj;
|
||||
pve->pve_offset += tobj->backing_object_offset;
|
||||
}
|
||||
@ -405,8 +405,8 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
|
||||
if (vp != NULL)
|
||||
vref(vp);
|
||||
if (lobj != obj)
|
||||
VM_OBJECT_UNLOCK(lobj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(lobj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
if (vp != NULL) {
|
||||
freepath = NULL;
|
||||
|
@ -708,10 +708,10 @@ shmget_allocate_segment(td, uap, mode)
|
||||
#endif
|
||||
return (ENOMEM);
|
||||
}
|
||||
VM_OBJECT_LOCK(shm_object);
|
||||
VM_OBJECT_WLOCK(shm_object);
|
||||
vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
|
||||
vm_object_set_flag(shm_object, OBJ_NOSPLIT);
|
||||
VM_OBJECT_UNLOCK(shm_object);
|
||||
VM_OBJECT_WUNLOCK(shm_object);
|
||||
|
||||
shmseg->object = shm_object;
|
||||
shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
|
||||
|
@ -254,9 +254,9 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
|
||||
int base, rv;
|
||||
|
||||
object = shmfd->shm_object;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (length == shmfd->shm_size) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (0);
|
||||
}
|
||||
nobjsize = OFF_TO_IDX(length + PAGE_MASK);
|
||||
@ -268,7 +268,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
|
||||
* object is mapped into the kernel.
|
||||
*/
|
||||
if (shmfd->shm_kmappings > 0) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (EBUSY);
|
||||
}
|
||||
|
||||
@ -289,9 +289,9 @@ retry:
|
||||
} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
|
||||
m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retry;
|
||||
} else if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
ma[0] = m;
|
||||
@ -309,7 +309,7 @@ retry:
|
||||
} else {
|
||||
vm_page_free(m);
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (EIO);
|
||||
}
|
||||
}
|
||||
@ -339,7 +339,7 @@ retry:
|
||||
/* Attempt to reserve the swap */
|
||||
delta = ptoa(nobjsize - object->size);
|
||||
if (!swap_reserve_by_cred(delta, object->cred)) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (ENOMEM);
|
||||
}
|
||||
object->charge += delta;
|
||||
@ -350,7 +350,7 @@ retry:
|
||||
shmfd->shm_mtime = shmfd->shm_ctime;
|
||||
mtx_unlock(&shm_timestamp_lock);
|
||||
object->size = nobjsize;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -371,10 +371,10 @@ shm_alloc(struct ucred *ucred, mode_t mode)
|
||||
shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
|
||||
shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
|
||||
KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
|
||||
VM_OBJECT_LOCK(shmfd->shm_object);
|
||||
VM_OBJECT_WLOCK(shmfd->shm_object);
|
||||
vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
|
||||
vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
|
||||
VM_OBJECT_UNLOCK(shmfd->shm_object);
|
||||
VM_OBJECT_WUNLOCK(shmfd->shm_object);
|
||||
vfs_timestamp(&shmfd->shm_birthtime);
|
||||
shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
|
||||
shmfd->shm_birthtime;
|
||||
@ -762,20 +762,20 @@ shm_map(struct file *fp, size_t size, off_t offset, void **memp)
|
||||
return (EINVAL);
|
||||
shmfd = fp->f_data;
|
||||
obj = shmfd->shm_object;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
/*
|
||||
* XXXRW: This validation is probably insufficient, and subject to
|
||||
* sign errors. It should be fixed.
|
||||
*/
|
||||
if (offset >= shmfd->shm_size ||
|
||||
offset + size > round_page(shmfd->shm_size)) {
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
shmfd->shm_kmappings++;
|
||||
vm_object_reference_locked(obj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
/* Map the object into the kernel_map and wire it. */
|
||||
kva = vm_map_min(kernel_map);
|
||||
@ -797,9 +797,9 @@ shm_map(struct file *fp, size_t size, off_t offset, void **memp)
|
||||
vm_object_deallocate(obj);
|
||||
|
||||
/* On failure, drop our mapping reference. */
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
shmfd->shm_kmappings--;
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
return (vm_mmap_to_errno(rv));
|
||||
}
|
||||
@ -841,10 +841,10 @@ shm_unmap(struct file *fp, void *mem, size_t size)
|
||||
if (obj != shmfd->shm_object)
|
||||
return (EINVAL);
|
||||
vm_map_remove(map, kva, kva + size);
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
|
||||
shmfd->shm_kmappings--;
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -1863,12 +1863,12 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
|
||||
* reclamation of its vnode does not
|
||||
* immediately destroy it.
|
||||
*/
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
if ((obj->flags & OBJ_DEAD) == 0) {
|
||||
vm_object_reference_locked(obj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
} else {
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
obj = NULL;
|
||||
}
|
||||
}
|
||||
@ -2045,7 +2045,7 @@ retry_space:
|
||||
vm_offset_t pgoff;
|
||||
struct mbuf *m0;
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
/*
|
||||
* Calculate the amount to transfer.
|
||||
* Not to exceed a page, the EOF,
|
||||
@ -2063,7 +2063,7 @@ retry_space:
|
||||
xfsize = omin(rem, xfsize);
|
||||
xfsize = omin(space - loopbytes, xfsize);
|
||||
if (xfsize <= 0) {
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
done = 1; /* all data sent */
|
||||
break;
|
||||
}
|
||||
@ -2084,7 +2084,7 @@ retry_space:
|
||||
* block.
|
||||
*/
|
||||
if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize))
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
else if (m != NULL)
|
||||
error = EAGAIN; /* send what we already got */
|
||||
else if (uap->flags & SF_NODISKIO)
|
||||
@ -2098,7 +2098,7 @@ retry_space:
|
||||
* when the I/O completes.
|
||||
*/
|
||||
vm_page_io_start(pg);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
/*
|
||||
* Get the page from backing store.
|
||||
@ -2120,10 +2120,10 @@ retry_space:
|
||||
td->td_ucred, NOCRED, &resid, td);
|
||||
VOP_UNLOCK(vp, 0);
|
||||
after_read:
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vm_page_io_finish(pg);
|
||||
if (!error)
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
mbstat.sf_iocnt++;
|
||||
}
|
||||
if (error) {
|
||||
@ -2138,7 +2138,7 @@ retry_space:
|
||||
pg->busy == 0 && !(pg->oflags & VPO_BUSY))
|
||||
vm_page_free(pg);
|
||||
vm_page_unlock(pg);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
if (error == EAGAIN)
|
||||
error = 0; /* not a real error */
|
||||
break;
|
||||
|
@ -842,9 +842,9 @@ aio_fsync_vnode(struct thread *td, struct vnode *vp)
|
||||
goto drop;
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
error = VOP_FSYNC(vp, MNT_WAIT, td);
|
||||
|
||||
|
@ -1380,7 +1380,7 @@ brelse(struct buf *bp)
|
||||
*/
|
||||
resid = bp->b_bufsize;
|
||||
foff = bp->b_offset;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
int had_bogus = 0;
|
||||
|
||||
@ -1428,7 +1428,7 @@ brelse(struct buf *bp)
|
||||
resid -= PAGE_SIZE - (foff & PAGE_MASK);
|
||||
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
if (bp->b_flags & (B_INVAL | B_RELBUF))
|
||||
vfs_vmio_release(bp);
|
||||
|
||||
@ -1656,7 +1656,7 @@ vfs_vmio_release(struct buf *bp)
|
||||
vm_page_t m;
|
||||
|
||||
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
|
||||
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
m = bp->b_pages[i];
|
||||
bp->b_pages[i] = NULL;
|
||||
@ -1688,7 +1688,7 @@ vfs_vmio_release(struct buf *bp)
|
||||
}
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
|
||||
|
||||
if (bp->b_bufsize) {
|
||||
bufspacewakeup();
|
||||
@ -2465,7 +2465,7 @@ inmem(struct vnode * vp, daddr_t blkno)
|
||||
size = vp->v_mount->mnt_stat.f_iosize;
|
||||
off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
|
||||
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
|
||||
if (!m)
|
||||
@ -2477,11 +2477,11 @@ inmem(struct vnode * vp, daddr_t blkno)
|
||||
(vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
|
||||
goto notinmem;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return 1;
|
||||
|
||||
notinmem:
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -2511,7 +2511,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
|
||||
KASSERT(bp->b_offset != NOOFFSET,
|
||||
("vfs_clean_pages_dirty_buf: no buffer offset"));
|
||||
|
||||
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
|
||||
vfs_drain_busy_pages(bp);
|
||||
vfs_setdirty_locked_object(bp);
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
@ -2524,7 +2524,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
|
||||
/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
|
||||
foff = noff;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3036,7 +3036,7 @@ allocbuf(struct buf *bp, int size)
|
||||
(vm_offset_t)bp->b_data) +
|
||||
(desiredpages << PAGE_SHIFT),
|
||||
(bp->b_npages - desiredpages));
|
||||
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
|
||||
for (i = desiredpages; i < bp->b_npages; i++) {
|
||||
/*
|
||||
* the page is not freed here -- it
|
||||
@ -3055,7 +3055,7 @@ allocbuf(struct buf *bp, int size)
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
|
||||
bp->b_npages = desiredpages;
|
||||
}
|
||||
} else if (size > bp->b_bcount) {
|
||||
@ -3076,7 +3076,7 @@ allocbuf(struct buf *bp, int size)
|
||||
|
||||
obj = bp->b_bufobj->bo_object;
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
while (bp->b_npages < desiredpages) {
|
||||
vm_page_t m;
|
||||
|
||||
@ -3138,7 +3138,7 @@ allocbuf(struct buf *bp, int size)
|
||||
toff += tinc;
|
||||
tinc = PAGE_SIZE;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
|
||||
/*
|
||||
* Step 3, fixup the KVM pmap. Remember that
|
||||
@ -3393,7 +3393,7 @@ bufdone_finish(struct buf *bp)
|
||||
bp->b_flags |= B_CACHE;
|
||||
}
|
||||
bogus = 0;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
int bogusflag = 0;
|
||||
int resid;
|
||||
@ -3435,7 +3435,7 @@ bufdone_finish(struct buf *bp)
|
||||
iosize -= resid;
|
||||
}
|
||||
vm_object_pip_wakeupn(obj, 0);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
if (bogus)
|
||||
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
||||
bp->b_pages, bp->b_npages);
|
||||
@ -3473,7 +3473,7 @@ vfs_unbusy_pages(struct buf *bp)
|
||||
return;
|
||||
|
||||
obj = bp->b_bufobj->bo_object;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
m = bp->b_pages[i];
|
||||
if (m == bogus_page) {
|
||||
@ -3488,7 +3488,7 @@ vfs_unbusy_pages(struct buf *bp)
|
||||
vm_page_io_finish(m);
|
||||
}
|
||||
vm_object_pip_wakeupn(obj, 0);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3609,7 +3609,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
|
||||
foff = bp->b_offset;
|
||||
KASSERT(bp->b_offset != NOOFFSET,
|
||||
("vfs_busy_pages: no buffer offset"));
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vfs_drain_busy_pages(bp);
|
||||
if (bp->b_bufsize != 0)
|
||||
vfs_setdirty_locked_object(bp);
|
||||
@ -3646,7 +3646,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
|
||||
}
|
||||
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
if (bogus)
|
||||
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
|
||||
bp->b_pages, bp->b_npages);
|
||||
@ -3677,7 +3677,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
|
||||
base += (bp->b_offset & PAGE_MASK);
|
||||
n = PAGE_SIZE - (base & PAGE_MASK);
|
||||
|
||||
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
|
||||
for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
|
||||
m = bp->b_pages[i];
|
||||
if (n > size)
|
||||
@ -3687,7 +3687,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
|
||||
size -= n;
|
||||
n = PAGE_SIZE;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3714,7 +3714,7 @@ vfs_bio_clrbuf(struct buf *bp)
|
||||
}
|
||||
bp->b_flags &= ~B_INVAL;
|
||||
bp->b_ioflags &= ~BIO_ERROR;
|
||||
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
|
||||
if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
|
||||
(bp->b_offset & PAGE_MASK) == 0) {
|
||||
if (bp->b_pages[0] == bogus_page)
|
||||
@ -3753,7 +3753,7 @@ vfs_bio_clrbuf(struct buf *bp)
|
||||
bp->b_pages[i]->valid |= mask;
|
||||
}
|
||||
unlock:
|
||||
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
|
||||
bp->b_resid = 0;
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
*/
|
||||
off = tbp->b_offset;
|
||||
tsize = size;
|
||||
VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
|
||||
for (j = 0; tsize > 0; j++) {
|
||||
toff = off & PAGE_MASK;
|
||||
tinc = tsize;
|
||||
@ -421,7 +421,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
off += tinc;
|
||||
tsize -= tinc;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
|
||||
if (tsize > 0) {
|
||||
bqrelse(tbp);
|
||||
break;
|
||||
@ -456,7 +456,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
BUF_KERNPROC(tbp);
|
||||
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
|
||||
tbp, b_cluster.cluster_entry);
|
||||
VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
|
||||
for (j = 0; j < tbp->b_npages; j += 1) {
|
||||
vm_page_t m;
|
||||
m = tbp->b_pages[j];
|
||||
@ -470,7 +470,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
if (m->valid == VM_PAGE_BITS_ALL)
|
||||
tbp->b_pages[j] = bogus_page;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
|
||||
/*
|
||||
* Don't inherit tbp->b_bufsize as it may be larger due to
|
||||
* a non-page-aligned size. Instead just aggregate using
|
||||
@ -488,13 +488,13 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
|
||||
* Fully valid pages in the cluster are already good and do not need
|
||||
* to be re-read from disk. Replace the page with bogus_page
|
||||
*/
|
||||
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
|
||||
for (j = 0; j < bp->b_npages; j++) {
|
||||
VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, RA_WLOCKED);
|
||||
if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
|
||||
bp->b_pages[j] = bogus_page;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
|
||||
if (bp->b_bufsize > bp->b_kvasize)
|
||||
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
|
||||
bp->b_bufsize, bp->b_kvasize);
|
||||
@ -919,12 +919,12 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
if (tbp->b_flags & B_VMIO) {
|
||||
vm_page_t m;
|
||||
|
||||
VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
|
||||
if (i != 0) { /* if not first buffer */
|
||||
for (j = 0; j < tbp->b_npages; j += 1) {
|
||||
m = tbp->b_pages[j];
|
||||
if (m->oflags & VPO_BUSY) {
|
||||
VM_OBJECT_UNLOCK(
|
||||
VM_OBJECT_WUNLOCK(
|
||||
tbp->b_object);
|
||||
bqrelse(tbp);
|
||||
goto finishcluster;
|
||||
@ -941,7 +941,7 @@ cluster_wbuild(vp, size, start_lbn, len)
|
||||
bp->b_npages++;
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
|
||||
VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
|
||||
}
|
||||
bp->b_bcount += size;
|
||||
bp->b_bufsize += size;
|
||||
|
@ -1043,10 +1043,10 @@ vop_stdadvise(struct vop_advise_args *ap)
|
||||
if (vp->v_object != NULL) {
|
||||
start = trunc_page(ap->a_start);
|
||||
end = round_page(ap->a_end);
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_cache(vp->v_object, OFF_TO_IDX(start),
|
||||
OFF_TO_IDX(end));
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
VOP_UNLOCK(vp, 0);
|
||||
break;
|
||||
|
@ -1245,9 +1245,9 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
|
||||
bufobj_wwait(bo, 0, 0);
|
||||
BO_UNLOCK(bo);
|
||||
if (bo->bo_object != NULL) {
|
||||
VM_OBJECT_LOCK(bo->bo_object);
|
||||
VM_OBJECT_WLOCK(bo->bo_object);
|
||||
vm_object_pip_wait(bo->bo_object, "bovlbx");
|
||||
VM_OBJECT_UNLOCK(bo->bo_object);
|
||||
VM_OBJECT_WUNLOCK(bo->bo_object);
|
||||
}
|
||||
BO_LOCK(bo);
|
||||
} while (bo->bo_numoutput > 0);
|
||||
@ -1258,10 +1258,10 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
|
||||
*/
|
||||
if (bo->bo_object != NULL &&
|
||||
(flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) {
|
||||
VM_OBJECT_LOCK(bo->bo_object);
|
||||
VM_OBJECT_WLOCK(bo->bo_object);
|
||||
vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
|
||||
OBJPR_CLEANONLY : 0);
|
||||
VM_OBJECT_UNLOCK(bo->bo_object);
|
||||
VM_OBJECT_WUNLOCK(bo->bo_object);
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
@ -2521,9 +2521,9 @@ vinactive(struct vnode *vp, struct thread *td)
|
||||
*/
|
||||
obj = vp->v_object;
|
||||
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
VOP_INACTIVE(vp, td);
|
||||
VI_LOCK(vp);
|
||||
@ -2604,9 +2604,9 @@ loop:
|
||||
*/
|
||||
if (flags & WRITECLOSE) {
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
error = VOP_FSYNC(vp, MNT_WAIT, td);
|
||||
if (error != 0) {
|
||||
@ -3504,11 +3504,11 @@ vfs_msync(struct mount *mp, int flags)
|
||||
|
||||
obj = vp->v_object;
|
||||
if (obj != NULL) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0,
|
||||
flags == MNT_WAIT ?
|
||||
OBJPC_SYNC : OBJPC_NOSYNC);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
vput(vp);
|
||||
}
|
||||
|
@ -3433,9 +3433,9 @@ sys_fsync(td, uap)
|
||||
vn_lock(vp, lock_flags | LK_RETRY);
|
||||
AUDIT_ARG_VNODE1(vp);
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
error = VOP_FSYNC(vp, MNT_WAIT, td);
|
||||
|
||||
|
@ -1897,9 +1897,9 @@ vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
|
||||
|
||||
if ((object = vp->v_object) == NULL)
|
||||
return;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_object_page_remove(object, start, end, 0);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -129,7 +129,7 @@ nfs_getpages(struct vop_getpages_args *ap)
|
||||
* allow the pager to zero-out the blanks. Partially valid pages
|
||||
* can only occur at the file EOF.
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (pages[ap->a_reqpage]->valid != 0) {
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (i != ap->a_reqpage) {
|
||||
@ -138,10 +138,10 @@ nfs_getpages(struct vop_getpages_args *ap)
|
||||
vm_page_unlock(pages[i]);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (0);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* We use only the kva address for the buffer, but this is extremely
|
||||
@ -171,7 +171,7 @@ nfs_getpages(struct vop_getpages_args *ap)
|
||||
|
||||
if (error && (uio.uio_resid == count)) {
|
||||
nfs_printf("nfs_getpages: error %d\n", error);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (i != ap->a_reqpage) {
|
||||
vm_page_lock(pages[i]);
|
||||
@ -179,7 +179,7 @@ nfs_getpages(struct vop_getpages_args *ap)
|
||||
vm_page_unlock(pages[i]);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (VM_PAGER_ERROR);
|
||||
}
|
||||
|
||||
@ -190,7 +190,7 @@ nfs_getpages(struct vop_getpages_args *ap)
|
||||
*/
|
||||
|
||||
size = count - uio.uio_resid;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
|
||||
vm_page_t m;
|
||||
nextoff = toff + PAGE_SIZE;
|
||||
@ -226,7 +226,7 @@ nfs_getpages(struct vop_getpages_args *ap)
|
||||
if (i != ap->a_reqpage)
|
||||
vm_page_readahead_finish(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1297,9 +1297,9 @@ nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
|
||||
* Now, flush as required.
|
||||
*/
|
||||
if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
|
||||
VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
|
||||
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
|
||||
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
|
||||
/*
|
||||
* If the page clean was interrupted, fail the invalidation.
|
||||
* Not doing so, we run the risk of losing dirty pages in the
|
||||
|
@ -629,9 +629,9 @@ nfs_close(struct vop_close_args *ap)
|
||||
* mmap'ed writes or via write().
|
||||
*/
|
||||
if (nfs_clean_pages_on_close && vp->v_object) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
mtx_lock(&np->n_mtx);
|
||||
if (np->n_flag & NMODIFIED) {
|
||||
|
@ -3333,9 +3333,9 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
|
||||
*/
|
||||
if (vp->v_object &&
|
||||
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
|
||||
} else {
|
||||
@ -3364,10 +3364,10 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
|
||||
|
||||
if (vp->v_object &&
|
||||
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
VM_OBJECT_WLOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, off, off + cnt,
|
||||
OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
VM_OBJECT_WUNLOCK(vp->v_object);
|
||||
}
|
||||
|
||||
bo = &vp->v_bufobj;
|
||||
|
@ -140,10 +140,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
|
||||
struct page *page = sg_page(&chunk->page_list[i]);
|
||||
if (umem->writable && dirty) {
|
||||
if (object && object != page->object)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (object != page->object) {
|
||||
object = page->object;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
vm_page_dirty(page);
|
||||
}
|
||||
@ -151,7 +151,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
|
||||
kfree(chunk);
|
||||
}
|
||||
if (object)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
@ -284,14 +284,14 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
|
||||
object = vme->object.vm_object;
|
||||
if (object == NULL)
|
||||
continue;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
while ((backing_object = object->backing_object) != NULL) {
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
offset += object->backing_object_offset;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = backing_object;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
/*
|
||||
* At the moment, vm_maps and objects aren't considered by
|
||||
* the MAC system, so only things with backing by a normal
|
||||
@ -334,10 +334,10 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
|
||||
vm_object_reference(object);
|
||||
(void) vn_start_write(vp, &mp, V_WAIT);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_object_page_clean(object, offset, offset +
|
||||
vme->end - vme->start, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VOP_UNLOCK(vp, 0);
|
||||
vn_finished_write(mp);
|
||||
vm_object_deallocate(object);
|
||||
|
@ -1229,7 +1229,7 @@ pmap_pinit(pmap_t pm)
|
||||
pm->pm_context[i] = -1;
|
||||
CPU_ZERO(&pm->pm_active);
|
||||
|
||||
VM_OBJECT_LOCK(pm->pm_tsb_obj);
|
||||
VM_OBJECT_WLOCK(pm->pm_tsb_obj);
|
||||
for (i = 0; i < TSB_PAGES; i++) {
|
||||
m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
|
||||
VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
|
||||
@ -1237,7 +1237,7 @@ pmap_pinit(pmap_t pm)
|
||||
m->md.pmap = pm;
|
||||
ma[i] = m;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(pm->pm_tsb_obj);
|
||||
VM_OBJECT_WUNLOCK(pm->pm_tsb_obj);
|
||||
pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
|
||||
|
||||
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
|
||||
@ -1291,7 +1291,7 @@ pmap_release(pmap_t pm)
|
||||
|
||||
pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
|
||||
obj = pm->pm_tsb_obj;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
|
||||
while (!TAILQ_EMPTY(&obj->memq)) {
|
||||
m = TAILQ_FIRST(&obj->memq);
|
||||
@ -1300,7 +1300,7 @@ pmap_release(pmap_t pm)
|
||||
atomic_subtract_int(&cnt.v_wire_count, 1);
|
||||
vm_page_free_zero(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
PMAP_LOCK_DESTROY(pm);
|
||||
}
|
||||
|
||||
|
@ -144,9 +144,9 @@ ffs_rawread_sync(struct vnode *vp)
|
||||
if ((obj = vp->v_object) != NULL &&
|
||||
(obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
|
||||
VI_UNLOCK(vp);
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
} else
|
||||
VI_UNLOCK(vp);
|
||||
|
||||
|
@ -843,7 +843,7 @@ ffs_getpages(ap)
|
||||
* user programs might reference data beyond the actual end of file
|
||||
* occuring within the page. We have to zero that data.
|
||||
*/
|
||||
VM_OBJECT_LOCK(mreq->object);
|
||||
VM_OBJECT_WLOCK(mreq->object);
|
||||
if (mreq->valid) {
|
||||
if (mreq->valid != VM_PAGE_BITS_ALL)
|
||||
vm_page_zero_invalid(mreq, TRUE);
|
||||
@ -854,10 +854,10 @@ ffs_getpages(ap)
|
||||
vm_page_unlock(ap->a_m[i]);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(mreq->object);
|
||||
VM_OBJECT_WUNLOCK(mreq->object);
|
||||
return VM_PAGER_OK;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(mreq->object);
|
||||
VM_OBJECT_WUNLOCK(mreq->object);
|
||||
|
||||
return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
|
||||
ap->a_count,
|
||||
|
@ -91,10 +91,10 @@ default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
object = vm_object_allocate(OBJT_DEFAULT,
|
||||
OFF_TO_IDX(round_page(offset + size)));
|
||||
if (cred != NULL) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
object->cred = cred;
|
||||
object->charge = size;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
return (object);
|
||||
}
|
||||
|
@ -236,13 +236,13 @@ dev_pager_dealloc(object)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
|
||||
|
||||
mtx_lock(&dev_pager_mtx);
|
||||
TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
|
||||
mtx_unlock(&dev_pager_mtx);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
|
||||
if (object->type == OBJT_DEVICE) {
|
||||
/*
|
||||
@ -305,12 +305,12 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
|
||||
pidx = OFF_TO_IDX(offset);
|
||||
memattr = object->memattr;
|
||||
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
dev = object->handle;
|
||||
csw = dev_refthread(dev, &ref);
|
||||
if (csw == NULL) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
return (VM_PAGER_FAIL);
|
||||
}
|
||||
td = curthread;
|
||||
@ -322,7 +322,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
|
||||
if (ret != 0) {
|
||||
printf(
|
||||
"WARNING: dev_pager_getpage: map function returns error %d", ret);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
return (VM_PAGER_FAIL);
|
||||
}
|
||||
|
||||
@ -339,7 +339,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
|
||||
* the new physical address.
|
||||
*/
|
||||
page = *mres;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_page_updatefake(page, paddr, memattr);
|
||||
} else {
|
||||
/*
|
||||
@ -347,7 +347,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
|
||||
* free up the all of the original pages.
|
||||
*/
|
||||
page = vm_page_getfake(paddr, memattr);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_page_lock(*mres);
|
||||
vm_page_free(*mres);
|
||||
vm_page_unlock(*mres);
|
||||
|
@ -124,11 +124,11 @@ phys_pager_dealloc(vm_object_t object)
|
||||
{
|
||||
|
||||
if (object->handle != NULL) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
mtx_lock(&phys_pager_mtx);
|
||||
TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
|
||||
mtx_unlock(&phys_pager_mtx);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
|
||||
sg = object->handle;
|
||||
memattr = object->memattr;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
offset = m[reqpage]->pindex;
|
||||
|
||||
/*
|
||||
@ -181,7 +181,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
|
||||
/* Construct a new fake page. */
|
||||
page = vm_page_getfake(paddr, memattr);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq);
|
||||
|
||||
/* Free the original pages and insert this fake page into the object. */
|
||||
|
@ -623,14 +623,14 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
crhold(cred);
|
||||
}
|
||||
object = vm_object_allocate(OBJT_DEFAULT, pindex);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
object->handle = handle;
|
||||
if (cred != NULL) {
|
||||
object->cred = cred;
|
||||
object->charge = size;
|
||||
}
|
||||
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
sx_xunlock(&sw_alloc_sx);
|
||||
mtx_unlock(&Giant);
|
||||
@ -641,13 +641,13 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
crhold(cred);
|
||||
}
|
||||
object = vm_object_allocate(OBJT_DEFAULT, pindex);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (cred != NULL) {
|
||||
object->cred = cred;
|
||||
object->charge = size;
|
||||
}
|
||||
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
return (object);
|
||||
}
|
||||
@ -836,7 +836,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
|
||||
daddr_t blk = SWAPBLK_NONE;
|
||||
vm_pindex_t beg = start; /* save start index */
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
while (size) {
|
||||
if (n == 0) {
|
||||
n = BLIST_MAX_ALLOC;
|
||||
@ -844,7 +844,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
|
||||
n >>= 1;
|
||||
if (n == 0) {
|
||||
swp_pager_meta_free(object, beg, start - beg);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
@ -856,7 +856,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
|
||||
--n;
|
||||
}
|
||||
swp_pager_meta_free(object, start, n);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -936,11 +936,11 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
|
||||
* swp_pager_meta_build() can sleep.
|
||||
*/
|
||||
vm_object_pip_add(srcobject, 1);
|
||||
VM_OBJECT_UNLOCK(srcobject);
|
||||
VM_OBJECT_WUNLOCK(srcobject);
|
||||
vm_object_pip_add(dstobject, 1);
|
||||
swp_pager_meta_build(dstobject, i, srcaddr);
|
||||
vm_object_pip_wakeup(dstobject);
|
||||
VM_OBJECT_LOCK(srcobject);
|
||||
VM_OBJECT_WLOCK(srcobject);
|
||||
vm_object_pip_wakeup(srcobject);
|
||||
}
|
||||
} else {
|
||||
@ -1149,7 +1149,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
/*
|
||||
* Getpbuf() can sleep.
|
||||
*/
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
/*
|
||||
* Get a swap buffer header to perform the IO
|
||||
*/
|
||||
@ -1170,7 +1170,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
bp->b_bufsize = PAGE_SIZE * (j - i);
|
||||
bp->b_pager.pg_reqpage = reqpage - i;
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
{
|
||||
int k;
|
||||
|
||||
@ -1189,7 +1189,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
* does not remove it.
|
||||
*/
|
||||
vm_object_pip_add(object, bp->b_npages);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* perform the I/O. NOTE!!! bp cannot be considered valid after
|
||||
@ -1210,7 +1210,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
|
||||
* cleared on completion. If an I/O error occurs, SWAPBLK_NONE
|
||||
* is set in the meta-data.
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
while ((mreq->oflags & VPO_SWAPINPROG) != 0) {
|
||||
mreq->oflags |= VPO_WANTED;
|
||||
PCPU_INC(cnt.v_intrans);
|
||||
@ -1285,7 +1285,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
|
||||
*/
|
||||
if (object->type != OBJT_SWAP)
|
||||
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
if (curproc != pageproc)
|
||||
sync = TRUE;
|
||||
@ -1380,7 +1380,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
|
||||
bp->b_bufsize = PAGE_SIZE * n;
|
||||
bp->b_blkno = blk;
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (j = 0; j < n; ++j) {
|
||||
vm_page_t mreq = m[i+j];
|
||||
|
||||
@ -1395,7 +1395,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
|
||||
mreq->oflags |= VPO_SWAPINPROG;
|
||||
bp->b_pages[j] = mreq;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
bp->b_npages = n;
|
||||
/*
|
||||
* Must set dirty range for NFS to work.
|
||||
@ -1445,7 +1445,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
|
||||
*/
|
||||
swp_pager_async_iodone(bp);
|
||||
}
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1489,7 +1489,7 @@ swp_pager_async_iodone(struct buf *bp)
|
||||
|
||||
if (bp->b_npages) {
|
||||
object = bp->b_pages[0]->object;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1613,7 +1613,7 @@ swp_pager_async_iodone(struct buf *bp)
|
||||
*/
|
||||
if (object != NULL) {
|
||||
vm_object_pip_wakeupn(object, bp->b_npages);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1748,13 +1748,13 @@ restart:
|
||||
for (j = 0; j < SWAP_META_PAGES; ++j) {
|
||||
if (swp_pager_isondev(swap->swb_pages[j], sp)) {
|
||||
/* avoid deadlock */
|
||||
if (!VM_OBJECT_TRYLOCK(object)) {
|
||||
if (!VM_OBJECT_TRYWLOCK(object)) {
|
||||
break;
|
||||
} else {
|
||||
mtx_unlock(&swhash_mtx);
|
||||
swp_pager_force_pagein(object,
|
||||
pindex + j);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
mtx_lock(&swhash_mtx);
|
||||
goto restart;
|
||||
}
|
||||
@ -1847,7 +1847,7 @@ retry:
|
||||
swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
|
||||
if (swap == NULL) {
|
||||
mtx_unlock(&swhash_mtx);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (uma_zone_exhausted(swap_zone)) {
|
||||
if (atomic_cmpset_int(&exhausted, 0, 1))
|
||||
printf("swap zone exhausted, "
|
||||
@ -1856,7 +1856,7 @@ retry:
|
||||
pause("swzonex", 10);
|
||||
} else
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -2466,14 +2466,14 @@ vmspace_swap_count(struct vmspace *vmspace)
|
||||
for (cur = map->header.next; cur != &map->header; cur = cur->next) {
|
||||
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
|
||||
(object = cur->object.vm_object) != NULL) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->type == OBJT_SWAP &&
|
||||
object->un_pager.swp.swp_bcount != 0) {
|
||||
n = (cur->end - cur->start) / PAGE_SIZE;
|
||||
count += object->un_pager.swp.swp_bcount *
|
||||
SWAP_META_PAGES * n / object->size + 1;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
}
|
||||
return (count);
|
||||
|
@ -1046,7 +1046,7 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
/*
|
||||
* This looks a little weird since we're getting one page at a time.
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
p = TAILQ_LAST(&object->memq, pglist);
|
||||
pages = p != NULL ? p->pindex + 1 : 0;
|
||||
startpages = pages;
|
||||
@ -1073,7 +1073,7 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
pages += 1;
|
||||
}
|
||||
done:
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
*flags = UMA_SLAB_PRIV;
|
||||
|
||||
return ((void *)retkva);
|
||||
|
@ -163,14 +163,14 @@ unlock_and_deallocate(struct faultstate *fs)
|
||||
{
|
||||
|
||||
vm_object_pip_wakeup(fs->object);
|
||||
VM_OBJECT_UNLOCK(fs->object);
|
||||
VM_OBJECT_WUNLOCK(fs->object);
|
||||
if (fs->object != fs->first_object) {
|
||||
VM_OBJECT_LOCK(fs->first_object);
|
||||
VM_OBJECT_WLOCK(fs->first_object);
|
||||
vm_page_lock(fs->first_m);
|
||||
vm_page_free(fs->first_m);
|
||||
vm_page_unlock(fs->first_m);
|
||||
vm_object_pip_wakeup(fs->first_object);
|
||||
VM_OBJECT_UNLOCK(fs->first_object);
|
||||
VM_OBJECT_WUNLOCK(fs->first_object);
|
||||
fs->first_m = NULL;
|
||||
}
|
||||
vm_object_deallocate(fs->first_object);
|
||||
@ -290,7 +290,7 @@ RetryFault:;
|
||||
* truncation operations) during I/O. This must be done after
|
||||
* obtaining the vnode lock in order to avoid possible deadlocks.
|
||||
*/
|
||||
VM_OBJECT_LOCK(fs.first_object);
|
||||
VM_OBJECT_WLOCK(fs.first_object);
|
||||
vm_object_reference_locked(fs.first_object);
|
||||
vm_object_pip_add(fs.first_object, 1);
|
||||
|
||||
@ -363,17 +363,17 @@ RetryFault:;
|
||||
vm_page_aflag_set(fs.m, PGA_REFERENCED);
|
||||
vm_page_unlock(fs.m);
|
||||
if (fs.object != fs.first_object) {
|
||||
if (!VM_OBJECT_TRYLOCK(
|
||||
if (!VM_OBJECT_TRYWLOCK(
|
||||
fs.first_object)) {
|
||||
VM_OBJECT_UNLOCK(fs.object);
|
||||
VM_OBJECT_LOCK(fs.first_object);
|
||||
VM_OBJECT_LOCK(fs.object);
|
||||
VM_OBJECT_WUNLOCK(fs.object);
|
||||
VM_OBJECT_WLOCK(fs.first_object);
|
||||
VM_OBJECT_WLOCK(fs.object);
|
||||
}
|
||||
vm_page_lock(fs.first_m);
|
||||
vm_page_free(fs.first_m);
|
||||
vm_page_unlock(fs.first_m);
|
||||
vm_object_pip_wakeup(fs.first_object);
|
||||
VM_OBJECT_UNLOCK(fs.first_object);
|
||||
VM_OBJECT_WUNLOCK(fs.first_object);
|
||||
fs.first_m = NULL;
|
||||
}
|
||||
unlock_map(&fs);
|
||||
@ -383,7 +383,7 @@ RetryFault:;
|
||||
"vmpfw");
|
||||
}
|
||||
vm_object_pip_wakeup(fs.object);
|
||||
VM_OBJECT_UNLOCK(fs.object);
|
||||
VM_OBJECT_WUNLOCK(fs.object);
|
||||
PCPU_INC(cnt.v_intrans);
|
||||
vm_object_deallocate(fs.first_object);
|
||||
goto RetryFault;
|
||||
@ -646,12 +646,12 @@ vnode_locked:
|
||||
*/
|
||||
if (fs.object != fs.first_object) {
|
||||
vm_object_pip_wakeup(fs.object);
|
||||
VM_OBJECT_UNLOCK(fs.object);
|
||||
VM_OBJECT_WUNLOCK(fs.object);
|
||||
|
||||
fs.object = fs.first_object;
|
||||
fs.pindex = fs.first_pindex;
|
||||
fs.m = fs.first_m;
|
||||
VM_OBJECT_LOCK(fs.object);
|
||||
VM_OBJECT_WLOCK(fs.object);
|
||||
}
|
||||
fs.first_m = NULL;
|
||||
|
||||
@ -669,11 +669,11 @@ vnode_locked:
|
||||
} else {
|
||||
KASSERT(fs.object != next_object,
|
||||
("object loop %p", next_object));
|
||||
VM_OBJECT_LOCK(next_object);
|
||||
VM_OBJECT_WLOCK(next_object);
|
||||
vm_object_pip_add(next_object, 1);
|
||||
if (fs.object != fs.first_object)
|
||||
vm_object_pip_wakeup(fs.object);
|
||||
VM_OBJECT_UNLOCK(fs.object);
|
||||
VM_OBJECT_WUNLOCK(fs.object);
|
||||
fs.object = next_object;
|
||||
}
|
||||
}
|
||||
@ -725,7 +725,7 @@ vnode_locked:
|
||||
*/
|
||||
((fs.object->type == OBJT_DEFAULT) ||
|
||||
(fs.object->type == OBJT_SWAP)) &&
|
||||
(is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) &&
|
||||
(is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) &&
|
||||
/*
|
||||
* We don't chase down the shadow chain
|
||||
*/
|
||||
@ -774,7 +774,7 @@ vnode_locked:
|
||||
* conditional
|
||||
*/
|
||||
vm_object_pip_wakeup(fs.object);
|
||||
VM_OBJECT_UNLOCK(fs.object);
|
||||
VM_OBJECT_WUNLOCK(fs.object);
|
||||
/*
|
||||
* Only use the new page below...
|
||||
*/
|
||||
@ -782,7 +782,7 @@ vnode_locked:
|
||||
fs.pindex = fs.first_pindex;
|
||||
fs.m = fs.first_m;
|
||||
if (!is_first_object_locked)
|
||||
VM_OBJECT_LOCK(fs.object);
|
||||
VM_OBJECT_WLOCK(fs.object);
|
||||
PCPU_INC(cnt.v_cow_faults);
|
||||
curthread->td_cow++;
|
||||
} else {
|
||||
@ -903,7 +903,7 @@ vnode_locked:
|
||||
*/
|
||||
KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
|
||||
("vm_fault: page %p partially invalid", fs.m));
|
||||
VM_OBJECT_UNLOCK(fs.object);
|
||||
VM_OBJECT_WUNLOCK(fs.object);
|
||||
|
||||
/*
|
||||
* Put this page into the physical map. We had to do the unlock above
|
||||
@ -914,7 +914,7 @@ vnode_locked:
|
||||
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
|
||||
if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
|
||||
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
|
||||
VM_OBJECT_LOCK(fs.object);
|
||||
VM_OBJECT_WLOCK(fs.object);
|
||||
vm_page_lock(fs.m);
|
||||
|
||||
/*
|
||||
@ -963,10 +963,10 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
|
||||
VM_OBJECT_LOCK_ASSERT(object, RA_WLOCKED);
|
||||
first_object = fs->first_object;
|
||||
if (first_object != object) {
|
||||
if (!VM_OBJECT_TRYLOCK(first_object)) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_LOCK(first_object);
|
||||
VM_OBJECT_LOCK(object);
|
||||
if (!VM_OBJECT_TRYWLOCK(first_object)) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VM_OBJECT_WLOCK(first_object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
}
|
||||
/* Neither fictitious nor unmanaged pages can be cached. */
|
||||
@ -999,7 +999,7 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
|
||||
}
|
||||
}
|
||||
if (first_object != object)
|
||||
VM_OBJECT_UNLOCK(first_object);
|
||||
VM_OBJECT_WUNLOCK(first_object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1044,28 +1044,28 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
||||
|
||||
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
|
||||
lobject = object;
|
||||
VM_OBJECT_LOCK(lobject);
|
||||
VM_OBJECT_WLOCK(lobject);
|
||||
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
|
||||
lobject->type == OBJT_DEFAULT &&
|
||||
(backing_object = lobject->backing_object) != NULL) {
|
||||
KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
|
||||
0, ("vm_fault_prefault: unaligned object offset"));
|
||||
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_UNLOCK(lobject);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
VM_OBJECT_WUNLOCK(lobject);
|
||||
lobject = backing_object;
|
||||
}
|
||||
/*
|
||||
* give-up when a page is not in memory
|
||||
*/
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_UNLOCK(lobject);
|
||||
VM_OBJECT_WUNLOCK(lobject);
|
||||
break;
|
||||
}
|
||||
if (m->valid == VM_PAGE_BITS_ALL &&
|
||||
(m->flags & PG_FICTITIOUS) == 0)
|
||||
pmap_enter_quick(pmap, addr, m, entry->protection);
|
||||
VM_OBJECT_UNLOCK(lobject);
|
||||
VM_OBJECT_WUNLOCK(lobject);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1257,7 +1257,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
dst_object->pg_color = atop(dst_entry->start);
|
||||
#endif
|
||||
|
||||
VM_OBJECT_LOCK(dst_object);
|
||||
VM_OBJECT_WLOCK(dst_object);
|
||||
KASSERT(upgrade || dst_entry->object.vm_object == NULL,
|
||||
("vm_fault_copy_entry: vm_object not NULL"));
|
||||
dst_entry->object.vm_object = dst_object;
|
||||
@ -1307,9 +1307,9 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
dst_m = vm_page_alloc(dst_object, dst_pindex,
|
||||
VM_ALLOC_NORMAL);
|
||||
if (dst_m == NULL) {
|
||||
VM_OBJECT_UNLOCK(dst_object);
|
||||
VM_OBJECT_WUNLOCK(dst_object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(dst_object);
|
||||
VM_OBJECT_WLOCK(dst_object);
|
||||
}
|
||||
} while (dst_m == NULL);
|
||||
|
||||
@ -1318,7 +1318,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
* (Because the source is wired down, the page will be in
|
||||
* memory.)
|
||||
*/
|
||||
VM_OBJECT_LOCK(src_object);
|
||||
VM_OBJECT_WLOCK(src_object);
|
||||
object = src_object;
|
||||
pindex = src_pindex + dst_pindex;
|
||||
while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
|
||||
@ -1327,18 +1327,18 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
/*
|
||||
* Allow fallback to backing objects if we are reading.
|
||||
*/
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
pindex += OFF_TO_IDX(object->backing_object_offset);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = backing_object;
|
||||
}
|
||||
if (src_m == NULL)
|
||||
panic("vm_fault_copy_wired: page missing");
|
||||
pmap_copy_page(src_m, dst_m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
dst_m->valid = VM_PAGE_BITS_ALL;
|
||||
dst_m->dirty = VM_PAGE_BITS_ALL;
|
||||
VM_OBJECT_UNLOCK(dst_object);
|
||||
VM_OBJECT_WUNLOCK(dst_object);
|
||||
|
||||
/*
|
||||
* Enter it in the pmap. If a wired, copy-on-write
|
||||
@ -1350,7 +1350,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
/*
|
||||
* Mark it no longer busy, and put it on the active list.
|
||||
*/
|
||||
VM_OBJECT_LOCK(dst_object);
|
||||
VM_OBJECT_WLOCK(dst_object);
|
||||
|
||||
if (upgrade) {
|
||||
vm_page_lock(src_m);
|
||||
@ -1367,7 +1367,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
}
|
||||
vm_page_wakeup(dst_m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(dst_object);
|
||||
VM_OBJECT_WUNLOCK(dst_object);
|
||||
if (upgrade) {
|
||||
dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
|
||||
vm_object_deallocate(src_object);
|
||||
|
@ -239,7 +239,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
|
||||
vm_pindex_t pindex;
|
||||
int rv;
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
pindex = OFF_TO_IDX(offset);
|
||||
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
@ -261,7 +261,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
|
||||
vm_page_unlock(m);
|
||||
vm_page_wakeup(m);
|
||||
out:
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (m);
|
||||
}
|
||||
|
||||
@ -395,7 +395,7 @@ vm_thread_new(struct thread *td, int pages)
|
||||
* For the length of the stack, link in a real page of ram for each
|
||||
* page of stack.
|
||||
*/
|
||||
VM_OBJECT_LOCK(ksobj);
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
for (i = 0; i < pages; i++) {
|
||||
/*
|
||||
* Get a kernel stack page.
|
||||
@ -405,7 +405,7 @@ vm_thread_new(struct thread *td, int pages)
|
||||
ma[i] = m;
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(ksobj);
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
pmap_qenter(ks, ma, pages);
|
||||
return (1);
|
||||
}
|
||||
@ -418,7 +418,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
|
||||
|
||||
atomic_add_int(&kstacks, -1);
|
||||
pmap_qremove(ks, pages);
|
||||
VM_OBJECT_LOCK(ksobj);
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
for (i = 0; i < pages; i++) {
|
||||
m = vm_page_lookup(ksobj, i);
|
||||
if (m == NULL)
|
||||
@ -428,7 +428,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
|
||||
vm_page_free(m);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(ksobj);
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
vm_object_deallocate(ksobj);
|
||||
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
|
||||
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
|
||||
@ -506,7 +506,7 @@ vm_thread_swapout(struct thread *td)
|
||||
pages = td->td_kstack_pages;
|
||||
ksobj = td->td_kstack_obj;
|
||||
pmap_qremove(td->td_kstack, pages);
|
||||
VM_OBJECT_LOCK(ksobj);
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
for (i = 0; i < pages; i++) {
|
||||
m = vm_page_lookup(ksobj, i);
|
||||
if (m == NULL)
|
||||
@ -516,7 +516,7 @@ vm_thread_swapout(struct thread *td)
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(ksobj);
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -531,7 +531,7 @@ vm_thread_swapin(struct thread *td)
|
||||
|
||||
pages = td->td_kstack_pages;
|
||||
ksobj = td->td_kstack_obj;
|
||||
VM_OBJECT_LOCK(ksobj);
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
for (i = 0; i < pages; i++)
|
||||
ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
|
||||
VM_ALLOC_WIRED);
|
||||
@ -558,7 +558,7 @@ vm_thread_swapin(struct thread *td)
|
||||
} else if (ma[i]->oflags & VPO_BUSY)
|
||||
vm_page_wakeup(ma[i]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(ksobj);
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
pmap_qenter(td->td_kstack, ma, pages);
|
||||
cpu_thread_swapin(td);
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
|
||||
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
|
||||
VM_PROT_ALL, 0);
|
||||
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
end_offset = offset + size;
|
||||
for (; offset < end_offset; offset += PAGE_SIZE) {
|
||||
tries = 0;
|
||||
@ -242,12 +242,12 @@ retry:
|
||||
m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
|
||||
low, high, PAGE_SIZE, 0, memattr);
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
|
||||
vm_map_unlock(map);
|
||||
vm_pageout_grow_cache(tries, low, high);
|
||||
vm_map_lock(map);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
tries++;
|
||||
goto retry;
|
||||
}
|
||||
@ -266,7 +266,7 @@ retry:
|
||||
pmap_zero_page(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_map_unlock(map);
|
||||
vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
|
||||
VM_MAP_WIRE_NOHOLES);
|
||||
@ -303,18 +303,18 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
|
||||
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
|
||||
VM_PROT_ALL, 0);
|
||||
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
tries = 0;
|
||||
retry:
|
||||
m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
|
||||
atop(size), low, high, alignment, boundary, memattr);
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
|
||||
vm_map_unlock(map);
|
||||
vm_pageout_grow_cache(tries, low, high);
|
||||
vm_map_lock(map);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
tries++;
|
||||
goto retry;
|
||||
}
|
||||
@ -328,7 +328,7 @@ retry:
|
||||
pmap_zero_page(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_map_unlock(map);
|
||||
vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
|
||||
VM_MAP_WIRE_NOHOLES);
|
||||
@ -488,7 +488,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
|
||||
|
||||
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
|
||||
|
||||
VM_OBJECT_LOCK(kmem_object);
|
||||
VM_OBJECT_WLOCK(kmem_object);
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
retry:
|
||||
m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
|
||||
@ -500,7 +500,7 @@ retry:
|
||||
*/
|
||||
if (m == NULL) {
|
||||
if ((flags & M_NOWAIT) == 0) {
|
||||
VM_OBJECT_UNLOCK(kmem_object);
|
||||
VM_OBJECT_WUNLOCK(kmem_object);
|
||||
entry->eflags |= MAP_ENTRY_IN_TRANSITION;
|
||||
vm_map_unlock(map);
|
||||
VM_WAIT;
|
||||
@ -510,7 +510,7 @@ retry:
|
||||
MAP_ENTRY_IN_TRANSITION,
|
||||
("kmem_back: volatile entry"));
|
||||
entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
|
||||
VM_OBJECT_LOCK(kmem_object);
|
||||
VM_OBJECT_WLOCK(kmem_object);
|
||||
goto retry;
|
||||
}
|
||||
/*
|
||||
@ -526,7 +526,7 @@ retry:
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_free(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(kmem_object);
|
||||
VM_OBJECT_WUNLOCK(kmem_object);
|
||||
vm_map_delete(map, addr, addr + size);
|
||||
return (KERN_NO_SPACE);
|
||||
}
|
||||
@ -536,7 +536,7 @@ retry:
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
|
||||
("kmem_malloc: page %p is managed", m));
|
||||
}
|
||||
VM_OBJECT_UNLOCK(kmem_object);
|
||||
VM_OBJECT_WUNLOCK(kmem_object);
|
||||
|
||||
/*
|
||||
* Mark map entry as non-pageable. Repeat the assert.
|
||||
@ -556,7 +556,7 @@ retry:
|
||||
/*
|
||||
* Loop thru pages, entering them in the pmap.
|
||||
*/
|
||||
VM_OBJECT_LOCK(kmem_object);
|
||||
VM_OBJECT_WLOCK(kmem_object);
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
|
||||
/*
|
||||
@ -566,7 +566,7 @@ retry:
|
||||
TRUE);
|
||||
vm_page_wakeup(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(kmem_object);
|
||||
VM_OBJECT_WUNLOCK(kmem_object);
|
||||
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
@ -1224,10 +1224,10 @@ charged:
|
||||
* reference counting is insufficient to recognize
|
||||
* aliases with precision.)
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->ref_count > 1 || object->shadow_count != 0)
|
||||
vm_object_clear_flag(object, OBJ_ONEMAPPING);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
else if ((prev_entry != &map->header) &&
|
||||
(prev_entry->eflags == protoeflags) &&
|
||||
@ -1625,12 +1625,12 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
|
||||
} else if (entry->object.vm_object != NULL &&
|
||||
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
|
||||
entry->cred != NULL) {
|
||||
VM_OBJECT_LOCK(entry->object.vm_object);
|
||||
VM_OBJECT_WLOCK(entry->object.vm_object);
|
||||
KASSERT(entry->object.vm_object->cred == NULL,
|
||||
("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
|
||||
entry->object.vm_object->cred = entry->cred;
|
||||
entry->object.vm_object->charge = entry->end - entry->start;
|
||||
VM_OBJECT_UNLOCK(entry->object.vm_object);
|
||||
VM_OBJECT_WUNLOCK(entry->object.vm_object);
|
||||
entry->cred = NULL;
|
||||
}
|
||||
|
||||
@ -1702,12 +1702,12 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
|
||||
} else if (entry->object.vm_object != NULL &&
|
||||
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
|
||||
entry->cred != NULL) {
|
||||
VM_OBJECT_LOCK(entry->object.vm_object);
|
||||
VM_OBJECT_WLOCK(entry->object.vm_object);
|
||||
KASSERT(entry->object.vm_object->cred == NULL,
|
||||
("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
|
||||
entry->object.vm_object->cred = entry->cred;
|
||||
entry->object.vm_object->charge = entry->end - entry->start;
|
||||
VM_OBJECT_UNLOCK(entry->object.vm_object);
|
||||
VM_OBJECT_WUNLOCK(entry->object.vm_object);
|
||||
entry->cred = NULL;
|
||||
}
|
||||
|
||||
@ -1807,7 +1807,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||
|
||||
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
|
||||
return;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
|
||||
pmap_object_init_pt(map->pmap, addr, object, pindex, size);
|
||||
goto unlock_return;
|
||||
@ -1858,7 +1858,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
|
||||
pmap_enter_object(map->pmap, start, addr + ptoa(psize),
|
||||
p_start, prot);
|
||||
unlock_return:
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1934,9 +1934,9 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
continue;
|
||||
}
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1948,7 +1948,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
KASSERT(obj->charge == 0,
|
||||
("vm_map_protect: object %p overcharged\n", obj));
|
||||
if (!swap_reserve(ptoa(obj->size))) {
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
vm_map_unlock(map);
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
@ -1956,7 +1956,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
crhold(cred);
|
||||
obj->cred = cred;
|
||||
obj->charge = ptoa(obj->size);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2719,7 +2719,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
|
||||
count = OFF_TO_IDX(size);
|
||||
offidxstart = OFF_TO_IDX(entry->offset);
|
||||
offidxend = offidxstart + count;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->ref_count != 1 &&
|
||||
((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
|
||||
object == kernel_object || object == kmem_object)) {
|
||||
@ -2748,7 +2748,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
} else
|
||||
entry->object.vm_object = NULL;
|
||||
if (map->system_map)
|
||||
@ -2956,7 +2956,7 @@ vm_map_copy_entry(
|
||||
*/
|
||||
size = src_entry->end - src_entry->start;
|
||||
if ((src_object = src_entry->object.vm_object) != NULL) {
|
||||
VM_OBJECT_LOCK(src_object);
|
||||
VM_OBJECT_WLOCK(src_object);
|
||||
charged = ENTRY_CHARGED(src_entry);
|
||||
if ((src_object->handle == NULL) &&
|
||||
(src_object->type == OBJT_DEFAULT ||
|
||||
@ -2977,7 +2977,7 @@ vm_map_copy_entry(
|
||||
src_object->cred = src_entry->cred;
|
||||
src_object->charge = size;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(src_object);
|
||||
VM_OBJECT_WUNLOCK(src_object);
|
||||
dst_entry->object.vm_object = src_object;
|
||||
if (charged) {
|
||||
cred = curthread->td_ucred;
|
||||
@ -3153,7 +3153,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
|
||||
vm_object_deallocate(object);
|
||||
object = old_entry->object.vm_object;
|
||||
}
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_object_clear_flag(object, OBJ_ONEMAPPING);
|
||||
if (old_entry->cred != NULL) {
|
||||
KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
|
||||
@ -3161,7 +3161,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
|
||||
object->charge = old_entry->end - old_entry->start;
|
||||
old_entry->cred = NULL;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* Clone the entry, referencing the shared object.
|
||||
@ -3847,10 +3847,10 @@ RetryLookup:;
|
||||
crfree(entry->cred);
|
||||
entry->cred = NULL;
|
||||
} else if (entry->cred != NULL) {
|
||||
VM_OBJECT_LOCK(eobject);
|
||||
VM_OBJECT_WLOCK(eobject);
|
||||
eobject->cred = entry->cred;
|
||||
eobject->charge = size;
|
||||
VM_OBJECT_UNLOCK(eobject);
|
||||
VM_OBJECT_WUNLOCK(eobject);
|
||||
entry->cred = NULL;
|
||||
}
|
||||
|
||||
@ -3875,10 +3875,10 @@ RetryLookup:;
|
||||
atop(size));
|
||||
entry->offset = 0;
|
||||
if (entry->cred != NULL) {
|
||||
VM_OBJECT_LOCK(entry->object.vm_object);
|
||||
VM_OBJECT_WLOCK(entry->object.vm_object);
|
||||
entry->object.vm_object->cred = entry->cred;
|
||||
entry->object.vm_object->charge = size;
|
||||
VM_OBJECT_UNLOCK(entry->object.vm_object);
|
||||
VM_OBJECT_WUNLOCK(entry->object.vm_object);
|
||||
entry->cred = NULL;
|
||||
}
|
||||
vm_map_lock_downgrade(map);
|
||||
|
@ -111,7 +111,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
|
||||
*/
|
||||
mtx_lock(&vm_object_list_mtx);
|
||||
TAILQ_FOREACH(object, &vm_object_list, object_list) {
|
||||
if (!VM_OBJECT_TRYLOCK(object)) {
|
||||
if (!VM_OBJECT_TRYWLOCK(object)) {
|
||||
/*
|
||||
* Avoid a lock-order reversal. Consequently,
|
||||
* the reported number of active pages may be
|
||||
@ -120,7 +120,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
|
||||
continue;
|
||||
}
|
||||
vm_object_clear_flag(object, OBJ_ACTIVE);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
mtx_unlock(&vm_object_list_mtx);
|
||||
/*
|
||||
@ -179,10 +179,10 @@ vmtotal(SYSCTL_HANDLER_ARGS)
|
||||
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
|
||||
(object = entry->object.vm_object) == NULL)
|
||||
continue;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_object_set_flag(object, OBJ_ACTIVE);
|
||||
paging |= object->paging_in_progress;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
vm_map_unlock_read(map);
|
||||
vmspace_free(vm);
|
||||
|
@ -881,12 +881,12 @@ RestartScan:
|
||||
m = PHYS_TO_VM_PAGE(locked_pa);
|
||||
if (m->object != object) {
|
||||
if (object != NULL)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = m->object;
|
||||
locked = VM_OBJECT_TRYLOCK(object);
|
||||
locked = VM_OBJECT_TRYWLOCK(object);
|
||||
vm_page_unlock(m);
|
||||
if (!locked) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_page_lock(m);
|
||||
goto retry;
|
||||
}
|
||||
@ -904,9 +904,9 @@ RestartScan:
|
||||
*/
|
||||
if (current->object.vm_object != object) {
|
||||
if (object != NULL)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = current->object.vm_object;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
if (object->type == OBJT_DEFAULT ||
|
||||
object->type == OBJT_SWAP ||
|
||||
@ -943,7 +943,7 @@ RestartScan:
|
||||
mincoreinfo |= MINCORE_REFERENCED_OTHER;
|
||||
}
|
||||
if (object != NULL)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* subyte may page fault. In case it needs to modify
|
||||
|
@ -419,9 +419,9 @@ vm_object_reference(vm_object_t object)
|
||||
{
|
||||
if (object == NULL)
|
||||
return;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_object_reference_locked(object);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -465,23 +465,23 @@ vm_object_vndeallocate(vm_object_t object)
|
||||
|
||||
if (object->ref_count > 1) {
|
||||
object->ref_count--;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
/* vrele may need the vnode lock. */
|
||||
vrele(vp);
|
||||
} else {
|
||||
vhold(vp);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
vdrop(vp);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
object->ref_count--;
|
||||
if (object->type == OBJT_DEAD) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VOP_UNLOCK(vp, 0);
|
||||
} else {
|
||||
if (object->ref_count == 0)
|
||||
VOP_UNSET_TEXT(vp);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vput(vp);
|
||||
}
|
||||
}
|
||||
@ -504,7 +504,7 @@ vm_object_deallocate(vm_object_t object)
|
||||
vm_object_t temp;
|
||||
|
||||
while (object != NULL) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->type == OBJT_VNODE) {
|
||||
vm_object_vndeallocate(object);
|
||||
return;
|
||||
@ -521,7 +521,7 @@ vm_object_deallocate(vm_object_t object)
|
||||
*/
|
||||
object->ref_count--;
|
||||
if (object->ref_count > 1) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return;
|
||||
} else if (object->ref_count == 1) {
|
||||
if (object->shadow_count == 0 &&
|
||||
@ -540,12 +540,12 @@ vm_object_deallocate(vm_object_t object)
|
||||
("vm_object_deallocate: ref_count: %d, shadow_count: %d",
|
||||
object->ref_count,
|
||||
object->shadow_count));
|
||||
if (!VM_OBJECT_TRYLOCK(robject)) {
|
||||
if (!VM_OBJECT_TRYWLOCK(robject)) {
|
||||
/*
|
||||
* Avoid a potential deadlock.
|
||||
*/
|
||||
object->ref_count++;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
/*
|
||||
* More likely than not the thread
|
||||
* holding robject's lock has lower
|
||||
@ -569,27 +569,27 @@ vm_object_deallocate(vm_object_t object)
|
||||
robject->ref_count++;
|
||||
retry:
|
||||
if (robject->paging_in_progress) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_object_pip_wait(robject,
|
||||
"objde1");
|
||||
temp = robject->backing_object;
|
||||
if (object == temp) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retry;
|
||||
}
|
||||
} else if (object->paging_in_progress) {
|
||||
VM_OBJECT_UNLOCK(robject);
|
||||
VM_OBJECT_WUNLOCK(robject);
|
||||
object->flags |= OBJ_PIPWNT;
|
||||
VM_OBJECT_SLEEP(object, object,
|
||||
PDROP | PVM, "objde2" , 0);
|
||||
VM_OBJECT_LOCK(robject);
|
||||
VM_OBJECT_WLOCK(robject);
|
||||
temp = robject->backing_object;
|
||||
if (object == temp) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retry;
|
||||
}
|
||||
} else
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
if (robject->ref_count == 1) {
|
||||
robject->ref_count--;
|
||||
@ -598,21 +598,21 @@ retry:
|
||||
}
|
||||
object = robject;
|
||||
vm_object_collapse(object);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
continue;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(robject);
|
||||
VM_OBJECT_WUNLOCK(robject);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return;
|
||||
}
|
||||
doterm:
|
||||
temp = object->backing_object;
|
||||
if (temp != NULL) {
|
||||
VM_OBJECT_LOCK(temp);
|
||||
VM_OBJECT_WLOCK(temp);
|
||||
LIST_REMOVE(object, shadow_list);
|
||||
temp->shadow_count--;
|
||||
VM_OBJECT_UNLOCK(temp);
|
||||
VM_OBJECT_WUNLOCK(temp);
|
||||
object->backing_object = NULL;
|
||||
}
|
||||
/*
|
||||
@ -623,7 +623,7 @@ doterm:
|
||||
if ((object->flags & OBJ_DEAD) == 0)
|
||||
vm_object_terminate(object);
|
||||
else
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = temp;
|
||||
}
|
||||
}
|
||||
@ -701,11 +701,11 @@ vm_object_terminate(vm_object_t object)
|
||||
* Clean pages and flush buffers.
|
||||
*/
|
||||
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
vinvalbuf(vp, V_SAVE, 0, 0);
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
|
||||
KASSERT(object->ref_count == 0,
|
||||
@ -760,7 +760,7 @@ vm_object_terminate(vm_object_t object)
|
||||
* Let the pager know object is dead.
|
||||
*/
|
||||
vm_pager_deallocate(object);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
vm_object_destroy(object);
|
||||
}
|
||||
@ -960,11 +960,11 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
return (TRUE);
|
||||
res = TRUE;
|
||||
error = 0;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
while ((backing_object = object->backing_object) != NULL) {
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
offset += object->backing_object_offset;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
object = backing_object;
|
||||
if (object->size < OFF_TO_IDX(offset + size))
|
||||
size = IDX_TO_OFF(object->size) - offset;
|
||||
@ -984,7 +984,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
if (object->type == OBJT_VNODE &&
|
||||
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
|
||||
vp = object->handle;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
(void) vn_start_write(vp, &mp, V_WAIT);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
if (syncio && !invalidate && offset == 0 &&
|
||||
@ -1002,17 +1002,17 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
|
||||
fsync_after = FALSE;
|
||||
}
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
res = vm_object_page_clean(object, offset, offset + size,
|
||||
flags);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (fsync_after)
|
||||
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
|
||||
VOP_UNLOCK(vp, 0);
|
||||
vn_finished_write(mp);
|
||||
if (error != 0)
|
||||
res = FALSE;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
if ((object->type == OBJT_VNODE ||
|
||||
object->type == OBJT_DEVICE) && invalidate) {
|
||||
@ -1030,7 +1030,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
vm_object_page_remove(object, OFF_TO_IDX(offset),
|
||||
OFF_TO_IDX(offset + size + PAGE_MASK), flags);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (res);
|
||||
}
|
||||
|
||||
@ -1065,7 +1065,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
|
||||
|
||||
if (object == NULL)
|
||||
return;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
/*
|
||||
* Locate and adjust resident pages
|
||||
*/
|
||||
@ -1106,10 +1106,10 @@ shadowlookup:
|
||||
backing_object = tobject->backing_object;
|
||||
if (backing_object == NULL)
|
||||
goto unlock_tobject;
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
tpindex += OFF_TO_IDX(tobject->backing_object_offset);
|
||||
if (tobject != object)
|
||||
VM_OBJECT_UNLOCK(tobject);
|
||||
VM_OBJECT_WUNLOCK(tobject);
|
||||
tobject = backing_object;
|
||||
goto shadowlookup;
|
||||
} else if (m->valid != VM_PAGE_BITS_ALL)
|
||||
@ -1137,10 +1137,10 @@ shadowlookup:
|
||||
}
|
||||
vm_page_unlock(m);
|
||||
if (object != tobject)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
m->oflags |= VPO_WANTED;
|
||||
VM_OBJECT_SLEEP(m, tobject, PDROP | PVM, "madvpo" , 0);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto relookup;
|
||||
}
|
||||
if (advise == MADV_WILLNEED) {
|
||||
@ -1173,9 +1173,9 @@ shadowlookup:
|
||||
swap_pager_freespace(tobject, tpindex, 1);
|
||||
unlock_tobject:
|
||||
if (tobject != object)
|
||||
VM_OBJECT_UNLOCK(tobject);
|
||||
VM_OBJECT_WUNLOCK(tobject);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1203,15 +1203,15 @@ vm_object_shadow(
|
||||
* Don't create the new object if the old object isn't shared.
|
||||
*/
|
||||
if (source != NULL) {
|
||||
VM_OBJECT_LOCK(source);
|
||||
VM_OBJECT_WLOCK(source);
|
||||
if (source->ref_count == 1 &&
|
||||
source->handle == NULL &&
|
||||
(source->type == OBJT_DEFAULT ||
|
||||
source->type == OBJT_SWAP)) {
|
||||
VM_OBJECT_UNLOCK(source);
|
||||
VM_OBJECT_WUNLOCK(source);
|
||||
return;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(source);
|
||||
VM_OBJECT_WUNLOCK(source);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1236,7 +1236,7 @@ vm_object_shadow(
|
||||
*/
|
||||
result->backing_object_offset = *offset;
|
||||
if (source != NULL) {
|
||||
VM_OBJECT_LOCK(source);
|
||||
VM_OBJECT_WLOCK(source);
|
||||
LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
|
||||
source->shadow_count++;
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
@ -1244,7 +1244,7 @@ vm_object_shadow(
|
||||
result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
|
||||
((1 << (VM_NFREEORDER - 1)) - 1);
|
||||
#endif
|
||||
VM_OBJECT_UNLOCK(source);
|
||||
VM_OBJECT_WUNLOCK(source);
|
||||
}
|
||||
|
||||
|
||||
@ -1275,7 +1275,7 @@ vm_object_split(vm_map_entry_t entry)
|
||||
return;
|
||||
if (orig_object->ref_count <= 1)
|
||||
return;
|
||||
VM_OBJECT_UNLOCK(orig_object);
|
||||
VM_OBJECT_WUNLOCK(orig_object);
|
||||
|
||||
offidxstart = OFF_TO_IDX(entry->offset);
|
||||
size = atop(entry->end - entry->start);
|
||||
@ -1290,17 +1290,17 @@ vm_object_split(vm_map_entry_t entry)
|
||||
* At this point, the new object is still private, so the order in
|
||||
* which the original and new objects are locked does not matter.
|
||||
*/
|
||||
VM_OBJECT_LOCK(new_object);
|
||||
VM_OBJECT_LOCK(orig_object);
|
||||
VM_OBJECT_WLOCK(new_object);
|
||||
VM_OBJECT_WLOCK(orig_object);
|
||||
source = orig_object->backing_object;
|
||||
if (source != NULL) {
|
||||
VM_OBJECT_LOCK(source);
|
||||
VM_OBJECT_WLOCK(source);
|
||||
if ((source->flags & OBJ_DEAD) != 0) {
|
||||
VM_OBJECT_UNLOCK(source);
|
||||
VM_OBJECT_UNLOCK(orig_object);
|
||||
VM_OBJECT_UNLOCK(new_object);
|
||||
VM_OBJECT_WUNLOCK(source);
|
||||
VM_OBJECT_WUNLOCK(orig_object);
|
||||
VM_OBJECT_WUNLOCK(new_object);
|
||||
vm_object_deallocate(new_object);
|
||||
VM_OBJECT_LOCK(orig_object);
|
||||
VM_OBJECT_WLOCK(orig_object);
|
||||
return;
|
||||
}
|
||||
LIST_INSERT_HEAD(&source->shadow_head,
|
||||
@ -1308,7 +1308,7 @@ vm_object_split(vm_map_entry_t entry)
|
||||
source->shadow_count++;
|
||||
vm_object_reference_locked(source); /* for new_object */
|
||||
vm_object_clear_flag(source, OBJ_ONEMAPPING);
|
||||
VM_OBJECT_UNLOCK(source);
|
||||
VM_OBJECT_WUNLOCK(source);
|
||||
new_object->backing_object_offset =
|
||||
orig_object->backing_object_offset + entry->offset;
|
||||
new_object->backing_object = source;
|
||||
@ -1335,10 +1335,10 @@ retry:
|
||||
* not be changed by this operation.
|
||||
*/
|
||||
if ((m->oflags & VPO_BUSY) || m->busy) {
|
||||
VM_OBJECT_UNLOCK(new_object);
|
||||
VM_OBJECT_WUNLOCK(new_object);
|
||||
m->oflags |= VPO_WANTED;
|
||||
VM_OBJECT_SLEEP(m, orig_object, PVM, "spltwt" , 0);
|
||||
VM_OBJECT_LOCK(new_object);
|
||||
VM_OBJECT_WLOCK(new_object);
|
||||
goto retry;
|
||||
}
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
@ -1382,14 +1382,14 @@ retry:
|
||||
vm_page_cache_transfer(orig_object, offidxstart,
|
||||
new_object);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(orig_object);
|
||||
VM_OBJECT_WUNLOCK(orig_object);
|
||||
TAILQ_FOREACH(m, &new_object->memq, listq)
|
||||
vm_page_wakeup(m);
|
||||
VM_OBJECT_UNLOCK(new_object);
|
||||
VM_OBJECT_WUNLOCK(new_object);
|
||||
entry->object.vm_object = new_object;
|
||||
entry->offset = 0LL;
|
||||
vm_object_deallocate(orig_object);
|
||||
VM_OBJECT_LOCK(new_object);
|
||||
VM_OBJECT_WLOCK(new_object);
|
||||
}
|
||||
|
||||
#define OBSC_TEST_ALL_SHADOWED 0x0001
|
||||
@ -1493,12 +1493,12 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
}
|
||||
} else if (op & OBSC_COLLAPSE_WAIT) {
|
||||
if ((p->oflags & VPO_BUSY) || p->busy) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
p->oflags |= VPO_WANTED;
|
||||
VM_OBJECT_SLEEP(p, backing_object,
|
||||
PDROP | PVM, "vmocol", 0);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
/*
|
||||
* If we slept, anything could have
|
||||
* happened. Since the object is
|
||||
@ -1661,7 +1661,7 @@ vm_object_collapse(vm_object_t object)
|
||||
* we check the backing object first, because it is most likely
|
||||
* not collapsable.
|
||||
*/
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
if (backing_object->handle != NULL ||
|
||||
(backing_object->type != OBJT_DEFAULT &&
|
||||
backing_object->type != OBJT_SWAP) ||
|
||||
@ -1670,7 +1670,7 @@ vm_object_collapse(vm_object_t object)
|
||||
(object->type != OBJT_DEFAULT &&
|
||||
object->type != OBJT_SWAP) ||
|
||||
(object->flags & OBJ_DEAD)) {
|
||||
VM_OBJECT_UNLOCK(backing_object);
|
||||
VM_OBJECT_WUNLOCK(backing_object);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1679,7 +1679,7 @@ vm_object_collapse(vm_object_t object)
|
||||
backing_object->paging_in_progress != 0
|
||||
) {
|
||||
vm_object_qcollapse(object);
|
||||
VM_OBJECT_UNLOCK(backing_object);
|
||||
VM_OBJECT_WUNLOCK(backing_object);
|
||||
break;
|
||||
}
|
||||
/*
|
||||
@ -1739,7 +1739,7 @@ vm_object_collapse(vm_object_t object)
|
||||
LIST_REMOVE(object, shadow_list);
|
||||
backing_object->shadow_count--;
|
||||
if (backing_object->backing_object) {
|
||||
VM_OBJECT_LOCK(backing_object->backing_object);
|
||||
VM_OBJECT_WLOCK(backing_object->backing_object);
|
||||
LIST_REMOVE(backing_object, shadow_list);
|
||||
LIST_INSERT_HEAD(
|
||||
&backing_object->backing_object->shadow_head,
|
||||
@ -1747,7 +1747,7 @@ vm_object_collapse(vm_object_t object)
|
||||
/*
|
||||
* The shadow_count has not changed.
|
||||
*/
|
||||
VM_OBJECT_UNLOCK(backing_object->backing_object);
|
||||
VM_OBJECT_WUNLOCK(backing_object->backing_object);
|
||||
}
|
||||
object->backing_object = backing_object->backing_object;
|
||||
object->backing_object_offset +=
|
||||
@ -1763,7 +1763,7 @@ vm_object_collapse(vm_object_t object)
|
||||
KASSERT(backing_object->ref_count == 1, (
|
||||
"backing_object %p was somehow re-referenced during collapse!",
|
||||
backing_object));
|
||||
VM_OBJECT_UNLOCK(backing_object);
|
||||
VM_OBJECT_WUNLOCK(backing_object);
|
||||
vm_object_destroy(backing_object);
|
||||
|
||||
object_collapses++;
|
||||
@ -1777,7 +1777,7 @@ vm_object_collapse(vm_object_t object)
|
||||
if (object->resident_page_count != object->size &&
|
||||
vm_object_backing_scan(object,
|
||||
OBSC_TEST_ALL_SHADOWED) == 0) {
|
||||
VM_OBJECT_UNLOCK(backing_object);
|
||||
VM_OBJECT_WUNLOCK(backing_object);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1791,7 +1791,7 @@ vm_object_collapse(vm_object_t object)
|
||||
|
||||
new_backing_object = backing_object->backing_object;
|
||||
if ((object->backing_object = new_backing_object) != NULL) {
|
||||
VM_OBJECT_LOCK(new_backing_object);
|
||||
VM_OBJECT_WLOCK(new_backing_object);
|
||||
LIST_INSERT_HEAD(
|
||||
&new_backing_object->shadow_head,
|
||||
object,
|
||||
@ -1799,7 +1799,7 @@ vm_object_collapse(vm_object_t object)
|
||||
);
|
||||
new_backing_object->shadow_count++;
|
||||
vm_object_reference_locked(new_backing_object);
|
||||
VM_OBJECT_UNLOCK(new_backing_object);
|
||||
VM_OBJECT_WUNLOCK(new_backing_object);
|
||||
object->backing_object_offset +=
|
||||
backing_object->backing_object_offset;
|
||||
}
|
||||
@ -1809,7 +1809,7 @@ vm_object_collapse(vm_object_t object)
|
||||
* its ref_count was at least 2, it will not vanish.
|
||||
*/
|
||||
backing_object->ref_count--;
|
||||
VM_OBJECT_UNLOCK(backing_object);
|
||||
VM_OBJECT_WUNLOCK(backing_object);
|
||||
object_bypasses++;
|
||||
}
|
||||
|
||||
@ -2056,10 +2056,10 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
|
||||
|
||||
if (prev_object == NULL)
|
||||
return (TRUE);
|
||||
VM_OBJECT_LOCK(prev_object);
|
||||
VM_OBJECT_WLOCK(prev_object);
|
||||
if (prev_object->type != OBJT_DEFAULT &&
|
||||
prev_object->type != OBJT_SWAP) {
|
||||
VM_OBJECT_UNLOCK(prev_object);
|
||||
VM_OBJECT_WUNLOCK(prev_object);
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
@ -2074,7 +2074,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
|
||||
* pages not mapped to prev_entry may be in use anyway)
|
||||
*/
|
||||
if (prev_object->backing_object != NULL) {
|
||||
VM_OBJECT_UNLOCK(prev_object);
|
||||
VM_OBJECT_WUNLOCK(prev_object);
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
@ -2084,7 +2084,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
|
||||
|
||||
if ((prev_object->ref_count > 1) &&
|
||||
(prev_object->size != next_pindex)) {
|
||||
VM_OBJECT_UNLOCK(prev_object);
|
||||
VM_OBJECT_WUNLOCK(prev_object);
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
@ -2138,7 +2138,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
|
||||
if (next_pindex + next_size > prev_object->size)
|
||||
prev_object->size = next_pindex + next_size;
|
||||
|
||||
VM_OBJECT_UNLOCK(prev_object);
|
||||
VM_OBJECT_WUNLOCK(prev_object);
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
|
@ -204,17 +204,17 @@ extern struct vm_object kmem_object_store;
|
||||
#define kernel_object (&kernel_object_store)
|
||||
#define kmem_object (&kmem_object_store)
|
||||
|
||||
#define VM_OBJECT_LOCK(object) \
|
||||
rw_wlock(&(object)->lock)
|
||||
#define VM_OBJECT_LOCK_ASSERT(object, type) \
|
||||
rw_assert(&(object)->lock, (type))
|
||||
#define VM_OBJECT_LOCK_INIT(object, name) \
|
||||
rw_init_flags(&(object)->lock, (name), RW_DUPOK)
|
||||
#define VM_OBJECT_SLEEP(wchan, object, pri, wmesg, timo) \
|
||||
rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo))
|
||||
#define VM_OBJECT_TRYLOCK(object) \
|
||||
#define VM_OBJECT_TRYWLOCK(object) \
|
||||
rw_try_wlock(&(object)->lock)
|
||||
#define VM_OBJECT_UNLOCK(object) \
|
||||
#define VM_OBJECT_WLOCK(object) \
|
||||
rw_wlock(&(object)->lock)
|
||||
#define VM_OBJECT_WUNLOCK(object) \
|
||||
rw_wunlock(&(object)->lock)
|
||||
|
||||
/*
|
||||
|
@ -2577,9 +2577,9 @@ retrylookup:
|
||||
m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
|
||||
VM_ALLOC_IGN_SBUSY));
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retrylookup;
|
||||
} else if (m->valid != 0)
|
||||
return (m);
|
||||
@ -2976,9 +2976,9 @@ vm_page_cowfault(vm_page_t m)
|
||||
if (mnew == NULL) {
|
||||
vm_page_insert(m, object, pindex);
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VM_WAIT;
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (m == vm_page_lookup(object, pindex)) {
|
||||
vm_page_lock(m);
|
||||
goto retry_alloc;
|
||||
@ -3035,11 +3035,11 @@ vm_page_cowsetup(vm_page_t m)
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
if ((m->flags & PG_FICTITIOUS) != 0 ||
|
||||
(m->oflags & VPO_UNMANAGED) != 0 ||
|
||||
m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
|
||||
m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYWLOCK(m->object))
|
||||
return (EBUSY);
|
||||
m->cow++;
|
||||
pmap_remove_write(m);
|
||||
VM_OBJECT_UNLOCK(m->object);
|
||||
VM_OBJECT_WUNLOCK(m->object);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -249,7 +249,7 @@ vm_pageout_init_marker(vm_page_t marker, u_short queue)
|
||||
/*
|
||||
* vm_pageout_fallback_object_lock:
|
||||
*
|
||||
* Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is
|
||||
* Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
|
||||
* known to have failed and page queue must be either PQ_ACTIVE or
|
||||
* PQ_INACTIVE. To avoid lock order violation, unlock the page queues
|
||||
* while locking the vm object. Use marker page to detect page queue
|
||||
@ -277,7 +277,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
|
||||
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
|
||||
vm_pagequeue_unlock(pq);
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_page_lock(m);
|
||||
vm_pagequeue_lock(pq);
|
||||
|
||||
@ -596,12 +596,12 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
|
||||
continue;
|
||||
}
|
||||
object = m->object;
|
||||
if ((!VM_OBJECT_TRYLOCK(object) &&
|
||||
if ((!VM_OBJECT_TRYWLOCK(object) &&
|
||||
(!vm_pageout_fallback_object_lock(m, &next) ||
|
||||
m->hold_count != 0)) || (m->oflags & VPO_BUSY) != 0 ||
|
||||
m->busy != 0) {
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
continue;
|
||||
}
|
||||
vm_page_test_dirty(m);
|
||||
@ -610,19 +610,19 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
|
||||
if (m->dirty != 0) {
|
||||
vm_page_unlock(m);
|
||||
if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
continue;
|
||||
}
|
||||
if (object->type == OBJT_VNODE) {
|
||||
vm_pagequeue_unlock(pq);
|
||||
vp = object->handle;
|
||||
vm_object_reference_locked(object);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
(void)vn_start_write(vp, &mp, V_WAIT);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
VOP_UNLOCK(vp, 0);
|
||||
vm_object_deallocate(object);
|
||||
vn_finished_write(mp);
|
||||
@ -633,7 +633,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
|
||||
m_tmp = m;
|
||||
vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
|
||||
0, NULL, NULL);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (TRUE);
|
||||
}
|
||||
} else {
|
||||
@ -645,7 +645,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
|
||||
vm_page_cache(m);
|
||||
vm_page_unlock(m);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
vm_pagequeue_unlock(pq);
|
||||
return (FALSE);
|
||||
@ -776,13 +776,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
|
||||
}
|
||||
if ((backing_object = object->backing_object) == NULL)
|
||||
goto unlock_return;
|
||||
VM_OBJECT_LOCK(backing_object);
|
||||
VM_OBJECT_WLOCK(backing_object);
|
||||
if (object != first_object)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
unlock_return:
|
||||
if (object != first_object)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -812,15 +812,15 @@ vm_pageout_map_deactivate_pages(map, desired)
|
||||
while (tmpe != &map->header) {
|
||||
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
|
||||
obj = tmpe->object.vm_object;
|
||||
if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
|
||||
if (obj != NULL && VM_OBJECT_TRYWLOCK(obj)) {
|
||||
if (obj->shadow_count <= 1 &&
|
||||
(bigobj == NULL ||
|
||||
bigobj->resident_page_count < obj->resident_page_count)) {
|
||||
if (bigobj != NULL)
|
||||
VM_OBJECT_UNLOCK(bigobj);
|
||||
VM_OBJECT_WUNLOCK(bigobj);
|
||||
bigobj = obj;
|
||||
} else
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
}
|
||||
if (tmpe->wired_count > 0)
|
||||
@ -830,7 +830,7 @@ vm_pageout_map_deactivate_pages(map, desired)
|
||||
|
||||
if (bigobj != NULL) {
|
||||
vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
|
||||
VM_OBJECT_UNLOCK(bigobj);
|
||||
VM_OBJECT_WUNLOCK(bigobj);
|
||||
}
|
||||
/*
|
||||
* Next, hunt around for other pages to deactivate. We actually
|
||||
@ -843,9 +843,9 @@ vm_pageout_map_deactivate_pages(map, desired)
|
||||
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
|
||||
obj = tmpe->object.vm_object;
|
||||
if (obj != NULL) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
}
|
||||
tmpe = tmpe->next;
|
||||
@ -964,10 +964,10 @@ vm_pageout_scan(int pass)
|
||||
continue;
|
||||
}
|
||||
object = m->object;
|
||||
if (!VM_OBJECT_TRYLOCK(object) &&
|
||||
if (!VM_OBJECT_TRYWLOCK(object) &&
|
||||
!vm_pageout_fallback_object_lock(m, &next)) {
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -980,7 +980,7 @@ vm_pageout_scan(int pass)
|
||||
*/
|
||||
if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0) {
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
addl_page_shortage++;
|
||||
continue;
|
||||
}
|
||||
@ -1017,7 +1017,7 @@ vm_pageout_scan(int pass)
|
||||
vm_page_activate(m);
|
||||
vm_page_unlock(m);
|
||||
m->act_count += actcount + ACT_ADVANCE;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
goto relock_queues;
|
||||
}
|
||||
|
||||
@ -1033,13 +1033,13 @@ vm_pageout_scan(int pass)
|
||||
vm_page_activate(m);
|
||||
vm_page_unlock(m);
|
||||
m->act_count += actcount + ACT_ADVANCE + 1;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
goto relock_queues;
|
||||
}
|
||||
|
||||
if (m->hold_count != 0) {
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* Held pages are essentially stuck in the
|
||||
@ -1123,7 +1123,7 @@ vm_pageout_scan(int pass)
|
||||
if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
|
||||
vm_pagequeue_lock(pq);
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
queues_locked = TRUE;
|
||||
vm_page_requeue_locked(m);
|
||||
goto relock_queues;
|
||||
@ -1166,17 +1166,17 @@ vm_pageout_scan(int pass)
|
||||
KASSERT(mp != NULL,
|
||||
("vp %p with NULL v_mount", vp));
|
||||
vm_object_reference_locked(object);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
|
||||
curthread)) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
++pageout_lock_miss;
|
||||
if (object->flags & OBJ_MIGHTBEDIRTY)
|
||||
vnodes_skipped++;
|
||||
vp = NULL;
|
||||
goto unlock_and_continue;
|
||||
}
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
vm_page_lock(m);
|
||||
vm_pagequeue_lock(pq);
|
||||
queues_locked = TRUE;
|
||||
@ -1237,7 +1237,7 @@ vm_pageout_scan(int pass)
|
||||
}
|
||||
unlock_and_continue:
|
||||
vm_page_lock_assert(m, MA_NOTOWNED);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (mp != NULL) {
|
||||
if (queues_locked) {
|
||||
vm_pagequeue_unlock(pq);
|
||||
@ -1252,7 +1252,7 @@ unlock_and_continue:
|
||||
goto relock_queues;
|
||||
}
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
relock_queues:
|
||||
if (!queues_locked) {
|
||||
vm_pagequeue_lock(pq);
|
||||
@ -1300,9 +1300,9 @@ relock_queues:
|
||||
continue;
|
||||
}
|
||||
object = m->object;
|
||||
if (!VM_OBJECT_TRYLOCK(object) &&
|
||||
if (!VM_OBJECT_TRYWLOCK(object) &&
|
||||
!vm_pageout_fallback_object_lock(m, &next)) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_page_unlock(m);
|
||||
m = next;
|
||||
continue;
|
||||
@ -1315,7 +1315,7 @@ relock_queues:
|
||||
(m->oflags & VPO_BUSY) ||
|
||||
(m->hold_count != 0)) {
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_page_requeue_locked(m);
|
||||
m = next;
|
||||
continue;
|
||||
@ -1376,7 +1376,7 @@ relock_queues:
|
||||
vm_page_requeue_locked(m);
|
||||
}
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
m = next;
|
||||
}
|
||||
vm_pagequeue_unlock(pq);
|
||||
@ -1572,9 +1572,9 @@ vm_pageout_page_stats(void)
|
||||
continue;
|
||||
}
|
||||
object = m->object;
|
||||
if (!VM_OBJECT_TRYLOCK(object) &&
|
||||
if (!VM_OBJECT_TRYWLOCK(object) &&
|
||||
!vm_pageout_fallback_object_lock(m, &next)) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_page_unlock(m);
|
||||
m = next;
|
||||
continue;
|
||||
@ -1587,7 +1587,7 @@ vm_pageout_page_stats(void)
|
||||
(m->oflags & VPO_BUSY) ||
|
||||
(m->hold_count != 0)) {
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vm_page_requeue_locked(m);
|
||||
m = next;
|
||||
continue;
|
||||
@ -1626,7 +1626,7 @@ vm_pageout_page_stats(void)
|
||||
}
|
||||
}
|
||||
vm_page_unlock(m);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
m = next;
|
||||
}
|
||||
vm_pagequeue_unlock(pq);
|
||||
|
@ -273,13 +273,13 @@ vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
|
||||
|
||||
TAILQ_FOREACH(object, pg_list, pager_object_list) {
|
||||
if (object->handle == handle) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if ((object->flags & OBJ_DEAD) == 0) {
|
||||
vm_object_reference_locked(object);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
break;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
}
|
||||
return (object);
|
||||
|
@ -110,9 +110,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
|
||||
return (0);
|
||||
|
||||
while ((object = vp->v_object) != NULL) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (!(object->flags & OBJ_DEAD)) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (0);
|
||||
}
|
||||
VOP_UNLOCK(vp, 0);
|
||||
@ -136,9 +136,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
|
||||
* Dereference the reference we just created. This assumes
|
||||
* that the object is associated with the vp.
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
object->ref_count--;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vrele(vp);
|
||||
|
||||
KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
|
||||
@ -155,7 +155,7 @@ vnode_destroy_vobject(struct vnode *vp)
|
||||
if (obj == NULL)
|
||||
return;
|
||||
ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
if (obj->ref_count == 0) {
|
||||
/*
|
||||
* vclean() may be called twice. The first time
|
||||
@ -168,13 +168,13 @@ vnode_destroy_vobject(struct vnode *vp)
|
||||
if ((obj->flags & OBJ_DEAD) == 0)
|
||||
vm_object_terminate(obj);
|
||||
else
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
} else {
|
||||
/*
|
||||
* Woe to the process that tries to page now :-).
|
||||
*/
|
||||
vm_pager_deallocate(obj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
vp->v_object = NULL;
|
||||
}
|
||||
@ -207,7 +207,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
*/
|
||||
retry:
|
||||
while ((object = vp->v_object) != NULL) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if ((object->flags & OBJ_DEAD) == 0)
|
||||
break;
|
||||
vm_object_set_flag(object, OBJ_DISCONNECTWNT);
|
||||
@ -240,7 +240,7 @@ retry:
|
||||
VI_UNLOCK(vp);
|
||||
} else {
|
||||
object->ref_count++;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
vref(vp);
|
||||
return (object);
|
||||
@ -279,10 +279,10 @@ vnode_pager_dealloc(object)
|
||||
}
|
||||
vp->v_object = NULL;
|
||||
VOP_UNSET_TEXT(vp);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
while (refs-- > 0)
|
||||
vunref(vp);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
@ -323,9 +323,9 @@ vnode_pager_haspage(object, pindex, before, after)
|
||||
blocksperpage = (PAGE_SIZE / bsize);
|
||||
reqblock = pindex * blocksperpage;
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (err)
|
||||
return TRUE;
|
||||
if (bn == -1)
|
||||
@ -380,12 +380,12 @@ vnode_pager_setsize(vp, nsize)
|
||||
if ((object = vp->v_object) == NULL)
|
||||
return;
|
||||
/* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (nsize == object->un_pager.vnp.vnp_size) {
|
||||
/*
|
||||
* Hasn't changed size
|
||||
*/
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return;
|
||||
}
|
||||
nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
|
||||
@ -446,7 +446,7 @@ vnode_pager_setsize(vp, nsize)
|
||||
}
|
||||
object->un_pager.vnp.vnp_size = nsize;
|
||||
object->size = nobjsize;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -569,9 +569,9 @@ vnode_pager_input_smlfs(object, m)
|
||||
bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
|
||||
KASSERT((m->dirty & bits) == 0,
|
||||
("vnode_pager_input_smlfs: page %p is dirty", m));
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
m->valid |= bits;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
sf_buf_free(sf);
|
||||
if (error) {
|
||||
@ -608,7 +608,7 @@ vnode_pager_input_old(object, m)
|
||||
if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
|
||||
size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
|
||||
vp = object->handle;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* Allocate a kernel virtual address and initialize so that
|
||||
@ -638,7 +638,7 @@ vnode_pager_input_old(object, m)
|
||||
}
|
||||
sf_buf_free(sf);
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
|
||||
if (!error)
|
||||
@ -670,11 +670,11 @@ vnode_pager_getpages(object, m, count, reqpage)
|
||||
int bytes = count * PAGE_SIZE;
|
||||
|
||||
vp = object->handle;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
|
||||
KASSERT(rtval != EOPNOTSUPP,
|
||||
("vnode_pager: FS getpages not implemented\n"));
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
return rtval;
|
||||
}
|
||||
|
||||
@ -724,7 +724,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
*/
|
||||
error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL);
|
||||
if (error == EOPNOTSUPP) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
if (i != reqpage) {
|
||||
@ -735,17 +735,17 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
PCPU_INC(cnt.v_vnodein);
|
||||
PCPU_INC(cnt.v_vnodepgsin);
|
||||
error = vnode_pager_input_old(object, m[reqpage]);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (error);
|
||||
} else if (error != 0) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (i = 0; i < count; i++)
|
||||
if (i != reqpage) {
|
||||
vm_page_lock(m[i]);
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock(m[i]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (VM_PAGER_ERROR);
|
||||
|
||||
/*
|
||||
@ -755,14 +755,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
*/
|
||||
} else if ((PAGE_SIZE / bsize) > 1 &&
|
||||
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (i = 0; i < count; i++)
|
||||
if (i != reqpage) {
|
||||
vm_page_lock(m[i]);
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock(m[i]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
PCPU_INC(cnt.v_vnodein);
|
||||
PCPU_INC(cnt.v_vnodepgsin);
|
||||
return vnode_pager_input_smlfs(object, m[reqpage]);
|
||||
@ -773,7 +773,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
* clean up and return. Otherwise we have to re-read the
|
||||
* media.
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
|
||||
for (i = 0; i < count; i++)
|
||||
if (i != reqpage) {
|
||||
@ -781,7 +781,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock(m[i]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return VM_PAGER_OK;
|
||||
} else if (reqblock == -1) {
|
||||
pmap_zero_page(m[reqpage]);
|
||||
@ -794,11 +794,11 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock(m[i]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (VM_PAGER_OK);
|
||||
}
|
||||
m[reqpage]->valid = 0;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* here on direct device I/O
|
||||
@ -811,18 +811,18 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
for (first = 0, i = 0; i < count; i = runend) {
|
||||
if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
|
||||
&runpg) != 0) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (; i < count; i++)
|
||||
if (i != reqpage) {
|
||||
vm_page_lock(m[i]);
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock(m[i]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (VM_PAGER_ERROR);
|
||||
}
|
||||
if (firstaddr == -1) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
|
||||
panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
|
||||
(intmax_t)firstaddr, (uintmax_t)(foff >> 32),
|
||||
@ -834,29 +834,29 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
vm_page_lock(m[i]);
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock(m[i]);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
runend = i + 1;
|
||||
first = runend;
|
||||
continue;
|
||||
}
|
||||
runend = i + runpg;
|
||||
if (runend <= reqpage) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (j = i; j < runend; j++) {
|
||||
vm_page_lock(m[j]);
|
||||
vm_page_free(m[j]);
|
||||
vm_page_unlock(m[j]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
} else {
|
||||
if (runpg < (count - first)) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (i = first + runpg; i < count; i++) {
|
||||
vm_page_lock(m[i]);
|
||||
vm_page_free(m[i]);
|
||||
vm_page_unlock(m[i]);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
count = first + runpg;
|
||||
}
|
||||
break;
|
||||
@ -947,7 +947,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
pbrelbo(bp);
|
||||
relpbuf(bp, &vnode_pbuf_freecnt);
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
|
||||
vm_page_t mt;
|
||||
|
||||
@ -984,7 +984,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
if (i != reqpage)
|
||||
vm_page_readahead_finish(mt);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
if (error) {
|
||||
printf("vnode_pager_getpages: I/O read error\n");
|
||||
}
|
||||
@ -1030,11 +1030,11 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
|
||||
* Call device-specific putpages function
|
||||
*/
|
||||
vp = object->handle;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
|
||||
KASSERT(rtval != EOPNOTSUPP,
|
||||
("vnode_pager: stale FS putpages\n"));
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
}
|
||||
|
||||
|
||||
@ -1096,7 +1096,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
|
||||
* We do not under any circumstances truncate the valid bits, as
|
||||
* this will screw up bogus page replacement.
|
||||
*/
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
|
||||
if (object->un_pager.vnp.vnp_size > poffset) {
|
||||
int pgoff;
|
||||
@ -1128,7 +1128,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
|
||||
}
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
/*
|
||||
* pageouts are already clustered, use IO_ASYNC to force a bawrite()
|
||||
@ -1182,7 +1182,7 @@ vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
|
||||
if (written == 0)
|
||||
return;
|
||||
obj = ma[0]->object;
|
||||
VM_OBJECT_LOCK(obj);
|
||||
VM_OBJECT_WLOCK(obj);
|
||||
for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
|
||||
if (pos < trunc_page(written)) {
|
||||
rtvals[i] = VM_PAGER_OK;
|
||||
@ -1193,7 +1193,7 @@ vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
|
||||
vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
VM_OBJECT_WUNLOCK(obj);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1203,9 +1203,9 @@ vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
|
||||
struct vnode *vp;
|
||||
vm_ooffset_t old_wm;
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
if (object->type != OBJT_VNODE) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return;
|
||||
}
|
||||
old_wm = object->un_pager.vnp.writemappings;
|
||||
@ -1222,7 +1222,7 @@ vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
|
||||
CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
|
||||
__func__, vp, vp->v_writecount);
|
||||
}
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1233,14 +1233,14 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
|
||||
struct mount *mp;
|
||||
vm_offset_t inc;
|
||||
|
||||
VM_OBJECT_LOCK(object);
|
||||
VM_OBJECT_WLOCK(object);
|
||||
|
||||
/*
|
||||
* First, recheck the object type to account for the race when
|
||||
* the vnode is reclaimed.
|
||||
*/
|
||||
if (object->type != OBJT_VNODE) {
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1251,13 +1251,13 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
|
||||
inc = end - start;
|
||||
if (object->un_pager.vnp.writemappings != inc) {
|
||||
object->un_pager.vnp.writemappings -= inc;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return;
|
||||
}
|
||||
|
||||
vp = object->handle;
|
||||
vhold(vp);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
mp = NULL;
|
||||
vn_start_write(vp, &mp, V_WAIT);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
||||
|
Loading…
x
Reference in New Issue
Block a user