Switch the vm_object mutex to be a rwlock. This will enable in the

future further optimizations where the vm_object lock will be held
in read mode most of the time the page cache resident pool of pages
are accessed for reading purposes.

The change is mostly mechanical but few notes are reported:
* The KPI changes as follow:
  - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK()
  - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK()
  - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK()
  - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED()
    (in order to avoid visibility of implementation details)
  - The read-mode operations are added:
    VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(),
    VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED()
* The vm/vm_pager.h namespace pollution avoidance (forcing requiring
  sys/mutex.h in consumers directly to cater its inlining functions
  using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h
  consumers now must include also sys/rwlock.h.
* zfs requires a quite convoluted fix to include FreeBSD rwlocks into
  the compat layer because the name clash between FreeBSD and solaris
  versions must be avoided.
  At this purpose zfs redefines the vm_object locking functions
  directly, isolating the FreeBSD components in specific compat stubs.

The KPI results heavilly broken by this commit.  Thirdy part ports must
be updated accordingly (I can think off-hand of VirtualBox, for example).

Sponsored by:	EMC / Isilon storage division
Reviewed by:	jeff
Reviewed by:	pjd (ZFS specific review)
Discussed with:	alc
Tested by:	pho
This commit is contained in:
Attilio Rao 2013-03-09 02:32:23 +00:00
parent c934116100
commit 89f6b8632c
94 changed files with 1000 additions and 783 deletions

View File

@ -80,6 +80,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP

View File

@ -3493,7 +3493,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
pa = VM_PAGE_TO_PHYS(m);
newpte = (pt_entry_t)(pa | PG_A | PG_V);
if ((access & VM_PROT_WRITE) != 0)
@ -3760,7 +3760,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@ -3942,7 +3942,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p, pdpg;
int pat_mode;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
@ -4556,7 +4556,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@ -4687,7 +4687,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@ -4831,7 +4831,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));

View File

@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/syscallsubr.h>

View File

@ -2212,7 +2212,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@ -3428,7 +3428,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@ -3475,7 +3475,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) != 0 ||
(m->aflags & PGA_WRITEABLE) != 0)
pmap_clearbit(m, PVF_WRITE);

View File

@ -3006,7 +3006,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@ -4461,7 +4461,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@ -4523,7 +4523,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) != 0 ||
(m->aflags & PGA_WRITEABLE) != 0)
pmap_clearbit(m, PVF_WRITE);

View File

@ -0,0 +1,68 @@
/*-
* Copyright (c) 2013 EMC Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/freebsd_rwlock.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
const int zfs_vm_pagerret_bad = VM_PAGER_BAD;
const int zfs_vm_pagerret_error = VM_PAGER_ERROR;
const int zfs_vm_pagerret_ok = VM_PAGER_OK;
void
zfs_vmobject_assert_wlocked(vm_object_t object)
{
/*
* This is not ideal because FILE/LINE used by assertions will not
* be too helpful, but it must be an hard function for
* compatibility reasons.
*/
VM_OBJECT_ASSERT_WLOCKED(object);
}
void
zfs_vmobject_wlock(vm_object_t object)
{
VM_OBJECT_WLOCK(object);
}
void
zfs_vmobject_wunlock(vm_object_t object)
{
VM_OBJECT_WUNLOCK(object);
}

View File

@ -0,0 +1,34 @@
/*-
* Copyright (c) 2013 EMC Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_FREEBSD_RWLOCK_H_
#define _OPENSOLARIS_SYS_FREEBSD_RWLOCK_H_
#include_next <sys/rwlock.h>
#endif

View File

@ -0,0 +1,44 @@
/*-
* Copyright (c) 2013 EMC Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_VM_H_
#define _OPENSOLARIS_SYS_VM_H_
#ifdef _KERNEL
extern const int zfs_vm_pagerret_bad;
extern const int zfs_vm_pagerret_error;
extern const int zfs_vm_pagerret_ok;
void zfs_vmobject_assert_wlocked(vm_object_t object);
void zfs_vmobject_wlock(vm_object_t object);
void zfs_vmobject_wunlock(vm_object_t object);
#endif /* _KERNEL */
#endif /* _OPENSOLARIS_SYS_VM_H_ */

View File

@ -103,7 +103,6 @@ extern "C" {
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
/* There is clash. vm_map.h defines the two below and vdev_cache.c use them. */

View File

@ -33,6 +33,7 @@
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/vm.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/stat.h>
@ -329,7 +330,7 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
vm_page_t pp;
obj = vp->v_object;
VM_OBJECT_LOCK_ASSERT(obj, MA_OWNED);
zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
@ -377,7 +378,7 @@ page_hold(vnode_t *vp, int64_t start)
vm_page_t pp;
obj = vp->v_object;
VM_OBJECT_LOCK_ASSERT(obj, MA_OWNED);
zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
@ -450,7 +451,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
ASSERT(obj != NULL);
off = start & PAGEOFFSET;
VM_OBJECT_LOCK(obj);
zfs_vmobject_wlock(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len);
@ -467,23 +468,23 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
("zfs update_pages: unbusy page in putpages case"));
KASSERT(!pmap_page_is_write_mapped(pp),
("zfs update_pages: writable page in putpages case"));
VM_OBJECT_UNLOCK(obj);
zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_write(os, oid, start, nbytes, va, tx);
zfs_unmap_page(sf);
VM_OBJECT_LOCK(obj);
zfs_vmobject_wlock(obj);
vm_page_undirty(pp);
} else if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
VM_OBJECT_UNLOCK(obj);
zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_read(os, oid, start+off, nbytes,
va+off, DMU_READ_PREFETCH);;
zfs_unmap_page(sf);
VM_OBJECT_LOCK(obj);
zfs_vmobject_wlock(obj);
page_unbusy(pp);
}
len -= nbytes;
@ -491,7 +492,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
}
if (segflg != UIO_NOCOPY)
vm_object_pip_wakeupn(obj, 0);
VM_OBJECT_UNLOCK(obj);
zfs_vmobject_wunlock(obj);
}
/*
@ -523,7 +524,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
ASSERT(obj != NULL);
ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
VM_OBJECT_LOCK(obj);
zfs_vmobject_wlock(obj);
for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len);
@ -531,14 +532,14 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY);
if (pp->valid == 0) {
vm_page_io_start(pp);
VM_OBJECT_UNLOCK(obj);
zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf);
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
bzero(va + bytes, PAGESIZE - bytes);
zfs_unmap_page(sf);
VM_OBJECT_LOCK(obj);
zfs_vmobject_wlock(obj);
vm_page_io_finish(pp);
vm_page_lock(pp);
if (error) {
@ -555,7 +556,7 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
uio->uio_offset += bytes;
len -= bytes;
}
VM_OBJECT_UNLOCK(obj);
zfs_vmobject_wunlock(obj);
return (error);
}
@ -587,7 +588,7 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
start = uio->uio_loffset;
off = start & PAGEOFFSET;
VM_OBJECT_LOCK(obj);
zfs_vmobject_wlock(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
@ -596,23 +597,23 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
struct sf_buf *sf;
caddr_t va;
VM_OBJECT_UNLOCK(obj);
zfs_vmobject_wunlock(obj);
va = zfs_map_page(pp, &sf);
error = uiomove(va + off, bytes, UIO_READ, uio);
zfs_unmap_page(sf);
VM_OBJECT_LOCK(obj);
zfs_vmobject_wlock(obj);
page_unhold(pp);
} else {
VM_OBJECT_UNLOCK(obj);
zfs_vmobject_wunlock(obj);
error = dmu_read_uio(os, zp->z_id, uio, bytes);
VM_OBJECT_LOCK(obj);
zfs_vmobject_wlock(obj);
}
len -= bytes;
off = 0;
if (error)
break;
}
VM_OBJECT_UNLOCK(obj);
zfs_vmobject_wunlock(obj);
return (error);
}
@ -5684,7 +5685,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
mfirst = m[reqstart];
mlast = m[reqstart + reqsize - 1];
VM_OBJECT_LOCK(object);
zfs_vmobject_wlock(object);
for (i = 0; i < reqstart; i++) {
vm_page_lock(m[i]);
@ -5700,9 +5701,9 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
if (mreq->valid && reqsize == 1) {
if (mreq->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(mreq, TRUE);
VM_OBJECT_UNLOCK(object);
zfs_vmobject_wunlock(object);
ZFS_EXIT(zfsvfs);
return (VM_PAGER_OK);
return (zfs_vm_pagerret_ok);
}
PCPU_INC(cnt.v_vnodein);
@ -5716,16 +5717,16 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
vm_page_unlock(m[i]);
}
}
VM_OBJECT_UNLOCK(object);
zfs_vmobject_wunlock(object);
ZFS_EXIT(zfsvfs);
return (VM_PAGER_BAD);
return (zfs_vm_pagerret_bad);
}
lsize = PAGE_SIZE;
if (IDX_TO_OFF(mlast->pindex) + lsize > object->un_pager.vnp.vnp_size)
lsize = object->un_pager.vnp.vnp_size - IDX_TO_OFF(mlast->pindex);
VM_OBJECT_UNLOCK(object);
zfs_vmobject_wunlock(object);
for (i = reqstart; i < reqstart + reqsize; i++) {
size = PAGE_SIZE;
@ -5741,7 +5742,7 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
break;
}
VM_OBJECT_LOCK(object);
zfs_vmobject_wlock(object);
for (i = reqstart; i < reqstart + reqsize; i++) {
if (!error)
@ -5751,11 +5752,11 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
vm_page_readahead_finish(m[i]);
}
VM_OBJECT_UNLOCK(object);
zfs_vmobject_wunlock(object);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
return (error ? zfs_vm_pagerret_error : zfs_vm_pagerret_ok);
}
static int

View File

@ -1031,9 +1031,9 @@ linprocfs_doprocmaps(PFS_FILL_ARGS)
e_end = entry->end;
obj = entry->object.vm_object;
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
last_timestamp = map->timestamp;
@ -1049,11 +1049,11 @@ linprocfs_doprocmaps(PFS_FILL_ARGS)
else
vp = NULL;
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
flags = obj->flags;
ref_count = obj->ref_count;
shadow_count = obj->shadow_count;
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (vp) {
vn_fullpath(td, vp, &name, &freename);
vn_lock(vp, LK_SHARED | LK_RETRY);

View File

@ -157,6 +157,7 @@ cddl/compat/opensolaris/kern/opensolaris_sysevent.c optional zfs compile-with
cddl/compat/opensolaris/kern/opensolaris_taskq.c optional zfs compile-with "${ZFS_C}"
cddl/compat/opensolaris/kern/opensolaris_uio.c optional zfs compile-with "${ZFS_C}"
cddl/compat/opensolaris/kern/opensolaris_vfs.c optional zfs compile-with "${ZFS_C}"
cddl/compat/opensolaris/kern/opensolaris_vm.c optional zfs compile-with "${ZFS_C}"
cddl/compat/opensolaris/kern/opensolaris_zone.c optional zfs compile-with "${ZFS_C}"
cddl/contrib/opensolaris/common/acl/acl_common.c optional zfs compile-with "${ZFS_C}"
cddl/contrib/opensolaris/common/avl/avl.c optional zfs compile-with "${ZFS_C}"

View File

@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <dev/agp/agppriv.h>
#include <dev/agp/agpvar.h>
@ -544,7 +545,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
* because vm_page_grab() may sleep and we can't hold a mutex
* while sleeping.
*/
VM_OBJECT_LOCK(mem->am_obj);
VM_OBJECT_WLOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
/*
* Find a page from the object and wire it
@ -557,14 +558,14 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
}
VM_OBJECT_UNLOCK(mem->am_obj);
VM_OBJECT_WUNLOCK(mem->am_obj);
mtx_lock(&sc->as_lock);
if (mem->am_is_bound) {
device_printf(dev, "memory already bound\n");
error = EINVAL;
VM_OBJECT_LOCK(mem->am_obj);
VM_OBJECT_WLOCK(mem->am_obj);
i = 0;
goto bad;
}
@ -573,7 +574,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
* Bind the individual pages and flush the chipset's
* TLB.
*/
VM_OBJECT_LOCK(mem->am_obj);
VM_OBJECT_WLOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
@ -601,7 +602,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
}
vm_page_wakeup(m);
}
VM_OBJECT_UNLOCK(mem->am_obj);
VM_OBJECT_WUNLOCK(mem->am_obj);
/*
* Flush the cpu cache since we are providing a new mapping
@ -622,7 +623,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
return 0;
bad:
mtx_unlock(&sc->as_lock);
VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(mem->am_obj);
for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
if (k >= i)
@ -631,7 +632,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(mem->am_obj);
VM_OBJECT_WUNLOCK(mem->am_obj);
return error;
}
@ -658,14 +659,14 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
*/
for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
AGP_UNBIND_PAGE(dev, mem->am_offset + i);
VM_OBJECT_LOCK(mem->am_obj);
VM_OBJECT_WLOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i));
vm_page_lock(m);
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(mem->am_obj);
VM_OBJECT_WUNLOCK(mem->am_obj);
agp_flush_cache();
AGP_FLUSH_TLB(dev);

View File

@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <dev/agp/agppriv.h>
#include <dev/agp/agpreg.h>
@ -1967,10 +1968,10 @@ agp_i810_alloc_memory(device_t dev, int type, vm_size_t size)
* Allocate and wire down the page now so that we can
* get its physical address.
*/
VM_OBJECT_LOCK(mem->am_obj);
VM_OBJECT_WLOCK(mem->am_obj);
m = vm_page_grab(mem->am_obj, 0, VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
VM_OBJECT_UNLOCK(mem->am_obj);
VM_OBJECT_WUNLOCK(mem->am_obj);
mem->am_physical = VM_PAGE_TO_PHYS(m);
} else {
/* Our allocation is already nicely wired down for us.
@ -2005,12 +2006,12 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
/*
* Unwire the page which we wired in alloc_memory.
*/
VM_OBJECT_LOCK(mem->am_obj);
VM_OBJECT_WLOCK(mem->am_obj);
m = vm_page_lookup(mem->am_obj, 0);
vm_page_lock(m);
vm_page_unwire(m, 0);
vm_page_unlock(m);
VM_OBJECT_UNLOCK(mem->am_obj);
VM_OBJECT_WUNLOCK(mem->am_obj);
} else {
contigfree(sc->argb_cursor, mem->am_size, M_AGP);
sc->argb_cursor = NULL;

View File

@ -59,6 +59,7 @@ struct drm_file;
#include <sys/fcntl.h>
#include <sys/uio.h>
#include <sys/filio.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/bus.h>
#include <sys/queue.h>

View File

@ -58,6 +58,7 @@ struct drm_file;
#include <sys/fcntl.h>
#include <sys/uio.h>
#include <sys/filio.h>
#include <sys/rwlock.h>
#include <sys/selinfo.h>
#include <sys/sysctl.h>
#include <sys/bus.h>

View File

@ -990,14 +990,14 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
vm_obj = obj->base.vm_obj;
ret = 0;
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
vm_object_pip_add(vm_obj, 1);
while (size > 0) {
obj_pi = OFF_TO_IDX(offset);
obj_po = offset & PAGE_MASK;
m = i915_gem_wire_page(vm_obj, obj_pi);
VM_OBJECT_UNLOCK(vm_obj);
VM_OBJECT_WUNLOCK(vm_obj);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
@ -1031,7 +1031,7 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
}
sf_buf_free(sf);
sched_unpin();
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
if (rw == UIO_WRITE)
vm_page_dirty(m);
vm_page_reference(m);
@ -1044,7 +1044,7 @@ i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
break;
}
vm_object_pip_wakeup(vm_obj);
VM_OBJECT_UNLOCK(vm_obj);
VM_OBJECT_WUNLOCK(vm_obj);
return (ret);
}
@ -1357,7 +1357,7 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
} else
oldm = NULL;
retry:
VM_OBJECT_UNLOCK(vm_obj);
VM_OBJECT_WUNLOCK(vm_obj);
unlocked_vmobj:
cause = ret = 0;
m = NULL;
@ -1407,7 +1407,7 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->fault_mappable = true;
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
offset);
if (m == NULL) {
@ -1452,7 +1452,7 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
kern_yield(PRI_USER);
goto unlocked_vmobj;
}
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
vm_object_pip_wakeup(vm_obj);
return (VM_PAGER_ERROR);
}
@ -2208,12 +2208,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
obj->pages = malloc(page_count * sizeof(vm_page_t), DRM_I915_GEM,
M_WAITOK);
vm_obj = obj->base.vm_obj;
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
for (i = 0; i < page_count; i++) {
if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
goto failed;
}
VM_OBJECT_UNLOCK(vm_obj);
VM_OBJECT_WUNLOCK(vm_obj);
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
return (0);
@ -2226,7 +2226,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
VM_OBJECT_UNLOCK(vm_obj);
VM_OBJECT_WUNLOCK(vm_obj);
free(obj->pages, DRM_I915_GEM);
obj->pages = NULL;
return (-EIO);
@ -2272,7 +2272,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
page_count = obj->base.size / PAGE_SIZE;
VM_OBJECT_LOCK(obj->base.vm_obj);
VM_OBJECT_WLOCK(obj->base.vm_obj);
#if GEM_PARANOID_CHECK_GTT
i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
#endif
@ -2287,7 +2287,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
VM_OBJECT_UNLOCK(obj->base.vm_obj);
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
obj->dirty = 0;
free(obj->pages, DRM_I915_GEM);
obj->pages = NULL;
@ -2309,7 +2309,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (devobj != NULL) {
page_count = OFF_TO_IDX(obj->base.size);
VM_OBJECT_LOCK(devobj);
VM_OBJECT_WLOCK(devobj);
retry:
for (i = 0; i < page_count; i++) {
m = vm_page_lookup(devobj, i);
@ -2319,7 +2319,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
goto retry;
cdev_pager_free_page(devobj, m);
}
VM_OBJECT_UNLOCK(devobj);
VM_OBJECT_WUNLOCK(devobj);
vm_object_deallocate(devobj);
}
@ -2437,9 +2437,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
vm_object_t vm_obj;
vm_obj = obj->base.vm_obj;
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
vm_object_page_remove(vm_obj, 0, 0, false);
VM_OBJECT_UNLOCK(vm_obj);
VM_OBJECT_WUNLOCK(vm_obj);
obj->madv = I915_MADV_PURGED_INTERNAL;
}
@ -2488,7 +2488,7 @@ i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
vm_page_t m;
int rv;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
if (vm_pager_has_page(object, pindex, NULL, NULL)) {
@ -3567,13 +3567,13 @@ i915_gem_detach_phys_object(struct drm_device *dev,
vaddr = obj->phys_obj->handle->vaddr;
page_count = obj->base.size / PAGE_SIZE;
VM_OBJECT_LOCK(obj->base.vm_obj);
VM_OBJECT_WLOCK(obj->base.vm_obj);
for (i = 0; i < page_count; i++) {
m = i915_gem_wire_page(obj->base.vm_obj, i);
if (m == NULL)
continue; /* XXX */
VM_OBJECT_UNLOCK(obj->base.vm_obj);
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
sf = sf_buf_alloc(m, 0);
if (sf != NULL) {
dst = (char *)sf_buf_kva(sf);
@ -3582,7 +3582,7 @@ i915_gem_detach_phys_object(struct drm_device *dev,
}
drm_clflush_pages(&m, 1);
VM_OBJECT_LOCK(obj->base.vm_obj);
VM_OBJECT_WLOCK(obj->base.vm_obj);
vm_page_reference(m);
vm_page_lock(m);
vm_page_dirty(m);
@ -3590,7 +3590,7 @@ i915_gem_detach_phys_object(struct drm_device *dev,
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
VM_OBJECT_UNLOCK(obj->base.vm_obj);
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
intel_gtt_chipset_flush();
obj->phys_obj->cur_obj = NULL;
@ -3632,7 +3632,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
VM_OBJECT_LOCK(obj->base.vm_obj);
VM_OBJECT_WLOCK(obj->base.vm_obj);
ret = 0;
for (i = 0; i < page_count; i++) {
m = i915_gem_wire_page(obj->base.vm_obj, i);
@ -3640,14 +3640,14 @@ i915_gem_attach_phys_object(struct drm_device *dev,
ret = -EIO;
break;
}
VM_OBJECT_UNLOCK(obj->base.vm_obj);
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
sf = sf_buf_alloc(m, 0);
src = (char *)sf_buf_kva(sf);
dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
memcpy(dst, src, PAGE_SIZE);
sf_buf_free(sf);
VM_OBJECT_LOCK(obj->base.vm_obj);
VM_OBJECT_WLOCK(obj->base.vm_obj);
vm_page_reference(m);
vm_page_lock(m);
@ -3655,7 +3655,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
vm_page_unlock(m);
atomic_add_long(&i915_gem_wired_pages_cnt, -1);
}
VM_OBJECT_UNLOCK(obj->base.vm_obj);
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
return (0);
}

View File

@ -118,7 +118,7 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
} else
oldm = NULL;
retry:
VM_OBJECT_UNLOCK(vm_obj);
VM_OBJECT_WUNLOCK(vm_obj);
m = NULL;
reserve:
@ -213,7 +213,7 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
}
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
if ((m->flags & VPO_BUSY) != 0) {
vm_page_sleep(m, "ttmpbs");
ttm_mem_io_unlock(man);
@ -241,11 +241,11 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
return (retval);
out_io_unlock:
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
goto out_io_unlock1;
out_unlock:
VM_OBJECT_LOCK(vm_obj);
VM_OBJECT_WLOCK(vm_obj);
goto out_unlock1;
}

View File

@ -285,7 +285,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
obj = ttm->swap_storage;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
vm_object_pip_add(obj, 1);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
@ -312,7 +312,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
vm_page_wakeup(from_page);
}
vm_object_pip_wakeup(obj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
vm_object_deallocate(obj);
@ -322,7 +322,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
err_ret:
vm_object_pip_wakeup(obj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
return (ret);
}
@ -346,7 +346,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
} else
obj = persistent_swap_storage;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
vm_object_pip_add(obj, 1);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
@ -359,7 +359,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
vm_page_wakeup(to_page);
}
vm_object_pip_wakeup(obj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = obj;

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/smp.h>
@ -1671,7 +1672,7 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
}
obj = entry->object.vm_object;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
/*
* Walk the backing_object list to find the base
@ -1679,9 +1680,9 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
*/
for (lobj = tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
if (tobj != obj)
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
@ -1691,14 +1692,14 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
if (lobj == NULL) {
PMCDBG(LOG,OPS,2, "hwpmc: lobj unexpectedly NULL! pid=%d "
"vm_map=%p vm_obj=%p\n", p->p_pid, map, obj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
continue;
}
if (lobj->type != OBJT_VNODE || lobj->handle == NULL) {
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_WUNLOCK(obj);
continue;
}
@ -1710,8 +1711,8 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
if (entry->start == last_end && lobj->handle == last_vp) {
last_end = entry->end;
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_WUNLOCK(obj);
continue;
}
@ -1733,9 +1734,9 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
vp = lobj->handle;
vref(vp);
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
freepath = NULL;
pmc_getfilename(vp, &fullpath, &freepath);

View File

@ -75,6 +75,7 @@
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
@ -657,17 +658,17 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
rv = VM_PAGER_OK;
VM_OBJECT_LOCK(sc->object);
VM_OBJECT_WLOCK(sc->object);
vm_object_pip_add(sc->object, 1);
for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
m = vm_page_grab(sc->object, i,
VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
VM_OBJECT_UNLOCK(sc->object);
VM_OBJECT_WUNLOCK(sc->object);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
VM_OBJECT_LOCK(sc->object);
VM_OBJECT_WLOCK(sc->object);
if (bp->bio_cmd == BIO_READ) {
if (m->valid != VM_PAGE_BITS_ALL)
rv = vm_pager_get_pages(sc->object, &m, 1, 0);
@ -732,7 +733,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
offs = 0;
}
vm_object_pip_subtract(sc->object, 1);
VM_OBJECT_UNLOCK(sc->object);
VM_OBJECT_WUNLOCK(sc->object);
return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
}
@ -1068,7 +1069,7 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
oldpages = OFF_TO_IDX(round_page(sc->mediasize));
newpages = OFF_TO_IDX(round_page(mdio->md_mediasize));
if (newpages < oldpages) {
VM_OBJECT_LOCK(sc->object);
VM_OBJECT_WLOCK(sc->object);
vm_object_page_remove(sc->object, newpages, 0, 0);
swap_pager_freespace(sc->object, newpages,
oldpages - newpages);
@ -1076,7 +1077,7 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
newpages), sc->cred);
sc->object->charge = IDX_TO_OFF(newpages);
sc->object->size = newpages;
VM_OBJECT_UNLOCK(sc->object);
VM_OBJECT_WUNLOCK(sc->object);
} else if (newpages > oldpages) {
res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
oldpages), sc->cred);
@ -1093,10 +1094,10 @@ mdresize(struct md_s *sc, struct md_ioctl *mdio)
return (EDOM);
}
}
VM_OBJECT_LOCK(sc->object);
VM_OBJECT_WLOCK(sc->object);
sc->object->charge = IDX_TO_OFF(newpages);
sc->object->size = newpages;
VM_OBJECT_UNLOCK(sc->object);
VM_OBJECT_WUNLOCK(sc->object);
}
break;
default:

View File

@ -81,6 +81,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mman.h> /* PROT_EXEC */
#include <sys/poll.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <vm/vm.h> /* vtophys */
#include <vm/pmap.h> /* vtophys */
#include <sys/socket.h> /* sockaddrs */

View File

@ -32,6 +32,8 @@
#include <dev/sound/pcm/sound.h>
#include <sys/ctype.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/sysent.h>
#include <vm/vm.h>

View File

@ -69,6 +69,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/sx.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/proc.h>
#include <sys/mount.h>
#include <sys/vnode.h>
@ -784,9 +785,9 @@ fuse_io_invalbuf(struct vnode *vp, struct thread *td)
fvdat->flag |= FN_FLUSHINPROG;
if (vp->v_bufobj.bo_object != NULL) {
VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
}
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
while (error) {

View File

@ -67,7 +67,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/proc.h>
#include <sys/mount.h>
@ -1758,7 +1758,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
* can only occur at the file EOF.
*/
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
@ -1769,11 +1769,11 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
return 0;
}
fuse_vm_page_unlock_queues();
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
/*
* We use only the kva address for the buffer, but this is extremely
@ -1803,7 +1803,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
FS_DEBUG("error %d\n", error);
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@ -1813,7 +1813,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
return VM_PAGER_ERROR;
}
/*
@ -1823,7 +1823,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
fuse_vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
@ -1886,7 +1886,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
}
}
fuse_vm_page_unlock_queues();
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
return 0;
}
@ -1975,9 +1975,9 @@ fuse_vnop_putpages(struct vop_putpages_args *ap)
for (i = 0; i < nwritten; i++) {
rtvals[i] = VM_PAGER_OK;
VM_OBJECT_LOCK(pages[i]->object);
VM_OBJECT_WLOCK(pages[i]->object);
vm_page_undirty(pages[i]);
VM_OBJECT_UNLOCK(pages[i]->object);
VM_OBJECT_WUNLOCK(pages[i]->object);
}
}
return rtvals[0];

View File

@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/kernel.h>
#include <sys/mount.h>
#include <sys/rwlock.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@ -134,7 +135,7 @@ ncl_getpages(struct vop_getpages_args *ap)
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@ -143,10 +144,10 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (0);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* We use only the kva address for the buffer, but this is extremely
@ -176,7 +177,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
ncl_printf("nfs_getpages: error %d\n", error);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
@ -184,7 +185,7 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
@ -195,7 +196,7 @@ ncl_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
@ -231,7 +232,7 @@ ncl_getpages(struct vop_getpages_args *ap)
if (i != ap->a_reqpage)
vm_page_readahead_finish(m);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (0);
}
@ -1353,9 +1354,9 @@ ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
* Now, flush as required.
*/
if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
/*
* If the page clean was interrupted, fail the invalidation.
* Not doing so, we run the risk of losing dirty pages in the

View File

@ -216,10 +216,10 @@ ncl_inactive(struct vop_inactive_args *ap)
* stateid is available for the writes.
*/
if (vp->v_object != NULL) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
retv = vm_object_page_clean(vp->v_object, 0, 0,
OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
} else
retv = TRUE;
if (retv == TRUE) {

View File

@ -697,9 +697,9 @@ nfs_close(struct vop_close_args *ap)
* mmap'ed writes or via write().
*/
if (nfs_clean_pages_on_close && vp->v_object) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {

View File

@ -1267,9 +1267,9 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
*/
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
} else {
@ -1298,10 +1298,10 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, off, off + cnt,
OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
bo = &vp->v_bufobj;

View File

@ -43,9 +43,9 @@
#include <sys/filedesc.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#ifdef COMPAT_FREEBSD32
#include <sys/sysent.h>
@ -132,7 +132,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
privateresident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
privateresident = obj->resident_page_count;
}
@ -148,9 +148,9 @@ procfs_doprocmap(PFS_FILL_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
last_timestamp = map->timestamp;
@ -181,12 +181,12 @@ procfs_doprocmap(PFS_FILL_ARGS)
break;
}
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
flags = obj->flags;
ref_count = obj->ref_count;
shadow_count = obj->shadow_count;
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(td, vp, &fullpath, &freepath);
vrele(vp);

View File

@ -38,9 +38,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/fnv_hash.h>
#include <sys/lock.h>
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
@ -1270,7 +1272,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0)
return (ENOSPC);
VM_OBJECT_LOCK(uobj);
VM_OBJECT_WLOCK(uobj);
if (newsize < oldsize) {
/*
* Zero the truncated part of the last page.
@ -1290,9 +1292,9 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
} else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL);
if (m == NULL) {
VM_OBJECT_UNLOCK(uobj);
VM_OBJECT_WUNLOCK(uobj);
VM_WAIT;
VM_OBJECT_LOCK(uobj);
VM_OBJECT_WLOCK(uobj);
goto retry;
} else if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
@ -1312,7 +1314,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
if (ignerr)
m = NULL;
else {
VM_OBJECT_UNLOCK(uobj);
VM_OBJECT_WUNLOCK(uobj);
return (EIO);
}
}
@ -1334,7 +1336,7 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
}
}
uobj->size = newpages;
VM_OBJECT_UNLOCK(uobj);
VM_OBJECT_WUNLOCK(uobj);
TMPFS_LOCK(tmp);
tmp->tm_pages_used += (newpages - oldpages);

View File

@ -39,9 +39,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/fcntl.h>
#include <sys/lockf.h>
#include <sys/lock.h>
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/stat.h>
@ -445,7 +447,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
vm_page_t m;
int error, rv;
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
m = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
@ -455,20 +457,20 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);
VM_OBJECT_UNLOCK(tobj);
VM_OBJECT_WUNLOCK(tobj);
return (EIO);
}
} else
vm_page_zero_invalid(m, TRUE);
}
VM_OBJECT_UNLOCK(tobj);
VM_OBJECT_WUNLOCK(tobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
vm_page_lock(m);
vm_page_unwire(m, TRUE);
vm_page_unlock(m);
vm_page_wakeup(m);
VM_OBJECT_UNLOCK(tobj);
VM_OBJECT_WUNLOCK(tobj);
return (error);
}
@ -511,7 +513,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
offset = addr & PAGE_MASK;
tlen = MIN(PAGE_SIZE - offset, len);
VM_OBJECT_LOCK(vobj);
VM_OBJECT_WLOCK(vobj);
lookupvpg:
if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
vm_page_is_valid(m, offset, tlen)) {
@ -525,11 +527,11 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
goto lookupvpg;
}
vm_page_busy(m);
VM_OBJECT_UNLOCK(vobj);
VM_OBJECT_WUNLOCK(vobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
VM_OBJECT_LOCK(vobj);
VM_OBJECT_WLOCK(vobj);
vm_page_wakeup(m);
VM_OBJECT_UNLOCK(vobj);
VM_OBJECT_WUNLOCK(vobj);
return (error);
} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
KASSERT(offset == 0,
@ -544,7 +546,7 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
goto lookupvpg;
}
vm_page_busy(m);
VM_OBJECT_UNLOCK(vobj);
VM_OBJECT_WUNLOCK(vobj);
sched_pin();
sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
ma = (char *)sf_buf_kva(sf);
@ -557,14 +559,14 @@ tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio
}
sf_buf_free(sf);
sched_unpin();
VM_OBJECT_LOCK(vobj);
VM_OBJECT_WLOCK(vobj);
if (error == 0)
m->valid = VM_PAGE_BITS_ALL;
vm_page_wakeup(m);
VM_OBJECT_UNLOCK(vobj);
VM_OBJECT_WUNLOCK(vobj);
return (error);
}
VM_OBJECT_UNLOCK(vobj);
VM_OBJECT_WUNLOCK(vobj);
error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);
return (error);
@ -634,7 +636,7 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
offset = addr & PAGE_MASK;
tlen = MIN(PAGE_SIZE - offset, len);
VM_OBJECT_LOCK(vobj);
VM_OBJECT_WLOCK(vobj);
lookupvpg:
if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
vm_page_is_valid(vpg, offset, tlen)) {
@ -649,15 +651,15 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
}
vm_page_busy(vpg);
vm_page_undirty(vpg);
VM_OBJECT_UNLOCK(vobj);
VM_OBJECT_WUNLOCK(vobj);
error = uiomove_fromphys(&vpg, offset, tlen, uio);
} else {
if (vm_page_is_cached(vobj, idx))
vm_page_cache_free(vobj, idx, idx + 1);
VM_OBJECT_UNLOCK(vobj);
VM_OBJECT_WUNLOCK(vobj);
vpg = NULL;
}
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (tpg->valid != VM_PAGE_BITS_ALL) {
@ -673,14 +675,14 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
} else
vm_page_zero_invalid(tpg, TRUE);
}
VM_OBJECT_UNLOCK(tobj);
VM_OBJECT_WUNLOCK(tobj);
if (vpg == NULL)
error = uiomove_fromphys(&tpg, offset, tlen, uio);
else {
KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
pmap_copy_page(vpg, tpg);
}
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
if (error == 0) {
KASSERT(tpg->valid == VM_PAGE_BITS_ALL,
("parts of tpg invalid"));
@ -691,11 +693,11 @@ tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *ui
vm_page_unlock(tpg);
vm_page_wakeup(tpg);
out:
VM_OBJECT_UNLOCK(tobj);
VM_OBJECT_WUNLOCK(tobj);
if (vpg != NULL) {
VM_OBJECT_LOCK(vobj);
VM_OBJECT_WLOCK(vobj);
vm_page_wakeup(vpg);
VM_OBJECT_UNLOCK(vobj);
VM_OBJECT_WUNLOCK(vobj);
}
return (error);

View File

@ -81,6 +81,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP

View File

@ -3457,7 +3457,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
mpte = NULL;
@ -3711,7 +3711,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@ -3889,7 +3889,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p;
int pat_mode;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if (pseflag &&
@ -4508,7 +4508,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@ -4643,7 +4643,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@ -4795,7 +4795,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));

View File

@ -2667,7 +2667,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
mpte = NULL;
@ -2870,7 +2870,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
multicall_entry_t *mclp = mcl;
int error, count = 0;
VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@ -3110,7 +3110,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_page_t p;
int pat_mode;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
if (pseflag &&
@ -3656,7 +3656,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PG_M set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@ -3787,7 +3787,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@ -3888,7 +3888,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));

View File

@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ptrace.h>
#include <sys/random.h>
#include <sys/reboot.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/syscall.h>

View File

@ -1802,7 +1802,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m;
vm_pindex_t diff, psize;
VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
m = m_start;
rw_wlock(&pvh_global_lock);
@ -1893,7 +1893,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_size_t size)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@ -2211,7 +2211,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be dirty.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@ -2295,7 +2295,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@ -2373,7 +2373,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;

View File

@ -45,7 +45,6 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/namei.h>
#include <sys/pioctl.h>
@ -53,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/procfs.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sf_buf.h>
#include <sys/smp.h>
#include <sys/systm.h>
@ -1278,15 +1278,15 @@ each_writable_segment(td, func, closure)
continue;
/* Ignore memory-mapped devices and such things. */
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WLOCK(backing_object);
VM_OBJECT_WUNLOCK(object);
object = backing_object;
}
ignore_entry = object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP && object->type != OBJT_VNODE;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (ignore_entry)
continue;

View File

@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pioctl.h>
#include <sys/namei.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sdt.h>
#include <sys/sf_buf.h>
@ -929,7 +930,7 @@ exec_map_first_page(imgp)
object = imgp->vp->v_object;
if (object == NULL)
return (EACCES);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
#if VM_NRESERVLEVEL > 0
if ((object->flags & OBJ_COLORED) == 0) {
object->flags |= OBJ_COLORED;
@ -964,7 +965,7 @@ exec_map_first_page(imgp)
vm_page_free(ma[0]);
vm_page_unlock(ma[0]);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (EIO);
}
}
@ -972,7 +973,7 @@ exec_map_first_page(imgp)
vm_page_hold(ma[0]);
vm_page_unlock(ma[0]);
vm_page_wakeup(ma[0]);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
imgp->firstpage = sf_buf_alloc(ma[0], 0);
imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);

View File

@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ptrace.h>
#include <sys/refcount.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sysent.h>
#include <sys/sched.h>
@ -1994,7 +1995,7 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
kve->kve_private_resident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
kve->kve_private_resident =
obj->resident_page_count;
@ -2009,9 +2010,9 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
@ -2071,11 +2072,11 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
break;
}
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
kve->kve_ref_count = obj->ref_count;
kve->kve_shadow_count = obj->shadow_count;
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(curthread, vp, &fullpath,
&freepath);
@ -2161,7 +2162,7 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
kve->kve_private_resident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
if (obj->shadow_count == 1)
kve->kve_private_resident =
obj->resident_page_count;
@ -2182,9 +2183,9 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
}
@ -2246,11 +2247,11 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
break;
}
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
kve->kve_ref_count = obj->ref_count;
kve->kve_shadow_count = obj->shadow_count;
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(curthread, vp, &fullpath,
&freepath);

View File

@ -34,7 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/sysent.h>
#include <sys/sysctl.h>
#include <sys/vdso.h>
@ -107,11 +107,11 @@ shared_page_init(void *dummy __unused)
sx_init(&shared_page_alloc_sx, "shpsx");
shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
VM_PROT_DEFAULT, 0, NULL);
VM_OBJECT_LOCK(shared_page_obj);
VM_OBJECT_WLOCK(shared_page_obj);
m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_NOBUSY |
VM_ALLOC_ZERO);
m->valid = VM_PAGE_BITS_ALL;
VM_OBJECT_UNLOCK(shared_page_obj);
VM_OBJECT_WUNLOCK(shared_page_obj);
addr = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
pmap_qenter(addr, &m, 1);
shared_page_mapping = (char *)addr;

View File

@ -62,6 +62,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>

View File

@ -45,9 +45,9 @@ __FBSDID("$FreeBSD$");
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mman.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
@ -104,7 +104,7 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
&upindex, &prot, &wired)) != KERN_SUCCESS) {
return(EFAULT);
}
VM_OBJECT_LOCK(uobject);
VM_OBJECT_WLOCK(uobject);
retry:
if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
@ -124,7 +124,7 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
}
vm_page_insert(kern_pg, uobject, upindex);
vm_page_dirty(kern_pg);
VM_OBJECT_UNLOCK(uobject);
VM_OBJECT_WUNLOCK(uobject);
vm_map_lookup_done(map, entry);
return(KERN_SUCCESS);
}

View File

@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/ptrace.h>
#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/malloc.h>
#include <sys/signalvar.h>
@ -381,7 +382,7 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
obj = entry->object.vm_object;
if (obj != NULL)
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
} while (0);
vm_map_unlock_read(map);
@ -394,9 +395,9 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
lobj = obj;
for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
if (tobj != obj)
VM_OBJECT_LOCK(tobj);
VM_OBJECT_WLOCK(tobj);
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_WUNLOCK(lobj);
lobj = tobj;
pve->pve_offset += tobj->backing_object_offset;
}
@ -404,8 +405,8 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
if (vp != NULL)
vref(vp);
if (lobj != obj)
VM_OBJECT_UNLOCK(lobj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_WUNLOCK(obj);
if (vp != NULL) {
freepath = NULL;

View File

@ -79,6 +79,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/syscallsubr.h>
@ -707,10 +708,10 @@ shmget_allocate_segment(td, uap, mode)
#endif
return (ENOMEM);
}
VM_OBJECT_LOCK(shm_object);
VM_OBJECT_WLOCK(shm_object);
vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shm_object, OBJ_NOSPLIT);
VM_OBJECT_UNLOCK(shm_object);
VM_OBJECT_WUNLOCK(shm_object);
shmseg->object = shm_object;
shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;

View File

@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/refcount.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
@ -253,9 +254,9 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
int base, rv;
object = shmfd->shm_object;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (length == shmfd->shm_size) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (0);
}
nobjsize = OFF_TO_IDX(length + PAGE_MASK);
@ -267,7 +268,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
* object is mapped into the kernel.
*/
if (shmfd->shm_kmappings > 0) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (EBUSY);
}
@ -288,9 +289,9 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
if (m == NULL) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
VM_WAIT;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
goto retry;
} else if (m->valid != VM_PAGE_BITS_ALL) {
ma[0] = m;
@ -308,7 +309,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
} else {
vm_page_free(m);
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (EIO);
}
}
@ -338,7 +339,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
/* Attempt to reserve the swap */
delta = ptoa(nobjsize - object->size);
if (!swap_reserve_by_cred(delta, object->cred)) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (ENOMEM);
}
object->charge += delta;
@ -349,7 +350,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
shmfd->shm_mtime = shmfd->shm_ctime;
mtx_unlock(&shm_timestamp_lock);
object->size = nobjsize;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (0);
}
@ -370,10 +371,10 @@ shm_alloc(struct ucred *ucred, mode_t mode)
shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
VM_OBJECT_LOCK(shmfd->shm_object);
VM_OBJECT_WLOCK(shmfd->shm_object);
vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
VM_OBJECT_UNLOCK(shmfd->shm_object);
VM_OBJECT_WUNLOCK(shmfd->shm_object);
vfs_timestamp(&shmfd->shm_birthtime);
shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
shmfd->shm_birthtime;
@ -761,20 +762,20 @@ shm_map(struct file *fp, size_t size, off_t offset, void **memp)
return (EINVAL);
shmfd = fp->f_data;
obj = shmfd->shm_object;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
/*
* XXXRW: This validation is probably insufficient, and subject to
* sign errors. It should be fixed.
*/
if (offset >= shmfd->shm_size ||
offset + size > round_page(shmfd->shm_size)) {
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
return (EINVAL);
}
shmfd->shm_kmappings++;
vm_object_reference_locked(obj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
/* Map the object into the kernel_map and wire it. */
kva = vm_map_min(kernel_map);
@ -796,9 +797,9 @@ shm_map(struct file *fp, size_t size, off_t offset, void **memp)
vm_object_deallocate(obj);
/* On failure, drop our mapping reference. */
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
shmfd->shm_kmappings--;
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
return (vm_mmap_to_errno(rv));
}
@ -840,10 +841,10 @@ shm_unmap(struct file *fp, void *mem, size_t size)
if (obj != shmfd->shm_object)
return (EINVAL);
vm_map_remove(map, kva, kva + size);
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
shmfd->shm_kmappings--;
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
return (0);
}

View File

@ -60,6 +60,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mount.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/rwlock.h>
#include <sys/sf_buf.h>
#include <sys/sysent.h>
#include <sys/socket.h>
@ -1907,12 +1908,12 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
* reclamation of its vnode does not
* immediately destroy it.
*/
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
if ((obj->flags & OBJ_DEAD) == 0) {
vm_object_reference_locked(obj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
} else {
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
obj = NULL;
}
}
@ -2089,7 +2090,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
vm_offset_t pgoff;
struct mbuf *m0;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
/*
* Calculate the amount to transfer.
* Not to exceed a page, the EOF,
@ -2107,7 +2108,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
xfsize = omin(rem, xfsize);
xfsize = omin(space - loopbytes, xfsize);
if (xfsize <= 0) {
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
done = 1; /* all data sent */
break;
}
@ -2128,7 +2129,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
* block.
*/
if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize))
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
else if (m != NULL)
error = EAGAIN; /* send what we already got */
else if (uap->flags & SF_NODISKIO)
@ -2142,7 +2143,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
* when the I/O completes.
*/
vm_page_io_start(pg);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
/*
* Get the page from backing store.
@ -2164,10 +2165,10 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
td->td_ucred, NOCRED, &resid, td);
VOP_UNLOCK(vp, 0);
after_read:
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
vm_page_io_finish(pg);
if (!error)
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
mbstat.sf_iocnt++;
}
if (error) {
@ -2182,7 +2183,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
pg->busy == 0 && !(pg->oflags & VPO_BUSY))
vm_page_free(pg);
vm_page_unlock(pg);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (error == EAGAIN)
error = 0; /* not a real error */
break;

View File

@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/protosw.h>
#include <sys/rwlock.h>
#include <sys/sema.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
@ -841,9 +842,9 @@ aio_fsync_vnode(struct thread *td, struct vnode *vp)
goto drop;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (vp->v_object != NULL) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);

View File

@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kthread.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@ -458,7 +459,7 @@ vfs_buf_test_cache(struct buf *bp,
vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (bp->b_flags & B_CACHE) {
int base = (foff + off) & PAGE_MASK;
if (vm_page_is_valid(m, base, size) == 0)
@ -1388,7 +1389,7 @@ brelse(struct buf *bp)
*/
resid = bp->b_bufsize;
foff = bp->b_offset;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
int had_bogus = 0;
@ -1436,7 +1437,7 @@ brelse(struct buf *bp)
resid -= PAGE_SIZE - (foff & PAGE_MASK);
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
}
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (bp->b_flags & (B_INVAL | B_RELBUF))
vfs_vmio_release(bp);
@ -1658,7 +1659,7 @@ vfs_vmio_release(struct buf *bp)
vm_page_t m;
pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
bp->b_pages[i] = NULL;
@ -1690,7 +1691,7 @@ vfs_vmio_release(struct buf *bp)
}
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
if (bp->b_bufsize) {
bufspacewakeup();
@ -2467,7 +2468,7 @@ inmem(struct vnode * vp, daddr_t blkno)
size = vp->v_mount->mnt_stat.f_iosize;
off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
if (!m)
@ -2479,11 +2480,11 @@ inmem(struct vnode * vp, daddr_t blkno)
(vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
goto notinmem;
}
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
return 1;
notinmem:
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
return (0);
}
@ -2513,7 +2514,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
KASSERT(bp->b_offset != NOOFFSET,
("vfs_clean_pages_dirty_buf: no buffer offset"));
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
vfs_drain_busy_pages(bp);
vfs_setdirty_locked_object(bp);
for (i = 0; i < bp->b_npages; i++) {
@ -2526,7 +2527,7 @@ vfs_clean_pages_dirty_buf(struct buf *bp)
/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
foff = noff;
}
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
}
static void
@ -2536,7 +2537,7 @@ vfs_setdirty_locked_object(struct buf *bp)
int i;
object = bp->b_bufobj->bo_object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* We qualify the scan for modified pages on whether the
@ -3042,7 +3043,7 @@ allocbuf(struct buf *bp, int size)
(vm_offset_t)bp->b_data) +
(desiredpages << PAGE_SHIFT),
(bp->b_npages - desiredpages));
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = desiredpages; i < bp->b_npages; i++) {
/*
* the page is not freed here -- it
@ -3061,7 +3062,7 @@ allocbuf(struct buf *bp, int size)
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
bp->b_npages = desiredpages;
}
} else if (size > bp->b_bcount) {
@ -3082,7 +3083,7 @@ allocbuf(struct buf *bp, int size)
obj = bp->b_bufobj->bo_object;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
while (bp->b_npages < desiredpages) {
vm_page_t m;
@ -3144,7 +3145,7 @@ allocbuf(struct buf *bp, int size)
toff += tinc;
tinc = PAGE_SIZE;
}
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
/*
* Step 3, fixup the KVM pmap. Remember that
@ -3399,7 +3400,7 @@ bufdone_finish(struct buf *bp)
bp->b_flags |= B_CACHE;
}
bogus = 0;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
int bogusflag = 0;
int resid;
@ -3441,7 +3442,7 @@ bufdone_finish(struct buf *bp)
iosize -= resid;
}
vm_object_pip_wakeupn(obj, 0);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (bogus)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
bp->b_pages, bp->b_npages);
@ -3479,7 +3480,7 @@ vfs_unbusy_pages(struct buf *bp)
return;
obj = bp->b_bufobj->bo_object;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
if (m == bogus_page) {
@ -3494,7 +3495,7 @@ vfs_unbusy_pages(struct buf *bp)
vm_page_io_finish(m);
}
vm_object_pip_wakeupn(obj, 0);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
}
/*
@ -3573,7 +3574,7 @@ vfs_drain_busy_pages(struct buf *bp)
vm_page_t m;
int i, last_busied;
VM_OBJECT_LOCK_ASSERT(bp->b_bufobj->bo_object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
last_busied = 0;
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
@ -3615,7 +3616,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
foff = bp->b_offset;
KASSERT(bp->b_offset != NOOFFSET,
("vfs_busy_pages: no buffer offset"));
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
vfs_drain_busy_pages(bp);
if (bp->b_bufsize != 0)
vfs_setdirty_locked_object(bp);
@ -3652,7 +3653,7 @@ vfs_busy_pages(struct buf *bp, int clear_modify)
}
foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
}
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
if (bogus)
pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
bp->b_pages, bp->b_npages);
@ -3683,7 +3684,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
base += (bp->b_offset & PAGE_MASK);
n = PAGE_SIZE - (base & PAGE_MASK);
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
m = bp->b_pages[i];
if (n > size)
@ -3693,7 +3694,7 @@ vfs_bio_set_valid(struct buf *bp, int base, int size)
size -= n;
n = PAGE_SIZE;
}
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
}
/*
@ -3720,13 +3721,13 @@ vfs_bio_clrbuf(struct buf *bp)
}
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
(bp->b_offset & PAGE_MASK) == 0) {
if (bp->b_pages[0] == bogus_page)
goto unlock;
mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
if ((bp->b_pages[0]->valid & mask) == mask)
goto unlock;
if ((bp->b_pages[0]->valid & mask) == 0) {
@ -3745,7 +3746,7 @@ vfs_bio_clrbuf(struct buf *bp)
continue;
j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
if ((bp->b_pages[i]->valid & mask) == mask)
continue;
if ((bp->b_pages[i]->valid & mask) == 0)
@ -3759,7 +3760,7 @@ vfs_bio_clrbuf(struct buf *bp)
bp->b_pages[i]->valid |= mask;
}
unlock:
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
bp->b_resid = 0;
}

View File

@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@ -406,21 +407,20 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
*/
off = tbp->b_offset;
tsize = size;
VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
for (j = 0; tsize > 0; j++) {
toff = off & PAGE_MASK;
tinc = tsize;
if (toff + tinc > PAGE_SIZE)
tinc = PAGE_SIZE - toff;
VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object,
MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object);
if ((tbp->b_pages[j]->valid &
vm_page_bits(toff, tinc)) != 0)
break;
off += tinc;
tsize -= tinc;
}
VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
if (tsize > 0) {
bqrelse(tbp);
break;
@ -455,7 +455,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
BUF_KERNPROC(tbp);
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
@ -469,7 +469,7 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
if (m->valid == VM_PAGE_BITS_ALL)
tbp->b_pages[j] = bogus_page;
}
VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
/*
* Don't inherit tbp->b_bufsize as it may be larger due to
* a non-page-aligned size. Instead just aggregate using
@ -487,13 +487,13 @@ cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
* Fully valid pages in the cluster are already good and do not need
* to be re-read from disk. Replace the page with bogus_page
*/
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
for (j = 0; j < bp->b_npages; j++) {
VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object);
if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
bp->b_pages[j] = bogus_page;
}
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
if (bp->b_bufsize > bp->b_kvasize)
panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
bp->b_bufsize, bp->b_kvasize);
@ -918,12 +918,12 @@ cluster_wbuild(vp, size, start_lbn, len)
if (tbp->b_flags & B_VMIO) {
vm_page_t m;
VM_OBJECT_LOCK(tbp->b_bufobj->bo_object);
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
if (i != 0) { /* if not first buffer */
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
if (m->oflags & VPO_BUSY) {
VM_OBJECT_UNLOCK(
VM_OBJECT_WUNLOCK(
tbp->b_object);
bqrelse(tbp);
goto finishcluster;
@ -940,7 +940,7 @@ cluster_wbuild(vp, size, start_lbn, len)
bp->b_npages++;
}
}
VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object);
VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
}
bp->b_bcount += size;
bp->b_bufsize += size;

View File

@ -47,8 +47,8 @@ __FBSDID("$FreeBSD$");
#include <sys/lockf.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/namei.h>
#include <sys/rwlock.h>
#include <sys/fcntl.h>
#include <sys/unistd.h>
#include <sys/vnode.h>
@ -1047,10 +1047,10 @@ vop_stdadvise(struct vop_advise_args *ap)
if (vp->v_object != NULL) {
start = trunc_page(ap->a_start);
end = round_page(ap->a_end);
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_cache(vp->v_object, OFF_TO_IDX(start),
OFF_TO_IDX(end));
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
VOP_UNLOCK(vp, 0);
break;

View File

@ -67,6 +67,7 @@ __FBSDID("$FreeBSD$");
#include <sys/namei.h>
#include <sys/priv.h>
#include <sys/reboot.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sleepqueue.h>
#include <sys/smp.h>
@ -1244,9 +1245,9 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
bufobj_wwait(bo, 0, 0);
BO_UNLOCK(bo);
if (bo->bo_object != NULL) {
VM_OBJECT_LOCK(bo->bo_object);
VM_OBJECT_WLOCK(bo->bo_object);
vm_object_pip_wait(bo->bo_object, "bovlbx");
VM_OBJECT_UNLOCK(bo->bo_object);
VM_OBJECT_WUNLOCK(bo->bo_object);
}
BO_LOCK(bo);
} while (bo->bo_numoutput > 0);
@ -1257,10 +1258,10 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
*/
if (bo->bo_object != NULL &&
(flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) {
VM_OBJECT_LOCK(bo->bo_object);
VM_OBJECT_WLOCK(bo->bo_object);
vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
OBJPR_CLEANONLY : 0);
VM_OBJECT_UNLOCK(bo->bo_object);
VM_OBJECT_WUNLOCK(bo->bo_object);
}
#ifdef INVARIANTS
@ -2520,9 +2521,9 @@ vinactive(struct vnode *vp, struct thread *td)
*/
obj = vp->v_object;
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
}
VOP_INACTIVE(vp, td);
VI_LOCK(vp);
@ -2603,9 +2604,9 @@ vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
*/
if (flags & WRITECLOSE) {
if (vp->v_object != NULL) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);
if (error != 0) {
@ -3503,11 +3504,11 @@ vfs_msync(struct mount *mp, int flags)
obj = vp->v_object;
if (obj != NULL) {
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0,
flags == MNT_WAIT ?
OBJPC_SYNC : OBJPC_NOSYNC);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
}
vput(vp);
}

View File

@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
#include <sys/filio.h>
#include <sys/limits.h>
#include <sys/linker.h>
#include <sys/rwlock.h>
#include <sys/sdt.h>
#include <sys/stat.h>
#include <sys/sx.h>
@ -3437,9 +3438,9 @@ sys_fsync(td, uap)
vn_lock(vp, lock_flags | LK_RETRY);
AUDIT_ARG_VNODE1(vp);
if (vp->v_object != NULL) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, td);

View File

@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/filio.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
#include <sys/ttycom.h>
@ -1892,9 +1893,9 @@ vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
if ((object = vp->v_object) == NULL)
return;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_object_page_remove(object, start, end, 0);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
int

View File

@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mbuf.h>
#include <sys/msgbuf.h>
#include <sys/reboot.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>

View File

@ -2399,7 +2399,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m, mpte;
vm_pindex_t diff, psize;
VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m_start->object);
psize = atop(end - start);
mpte = NULL;
m = m_start;
@ -2423,7 +2423,7 @@ void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@ -2768,7 +2768,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@ -2834,7 +2834,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_D set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@ -2882,7 +2882,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));

View File

@ -28,6 +28,7 @@ SRCS+= opensolaris_sysevent.c
SRCS+= opensolaris_taskq.c
SRCS+= opensolaris_uio.c
SRCS+= opensolaris_vfs.c
SRCS+= opensolaris_vm.c
SRCS+= opensolaris_zone.c
_A=${.CURDIR}/../../cddl/contrib/opensolaris/common/atomic

View File

@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mbuf.h>
#include <sys/mount.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@ -128,7 +129,7 @@ nfs_getpages(struct vop_getpages_args *ap)
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
@ -137,10 +138,10 @@ nfs_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (0);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* We use only the kva address for the buffer, but this is extremely
@ -170,7 +171,7 @@ nfs_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
nfs_printf("nfs_getpages: error %d\n", error);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
@ -178,7 +179,7 @@ nfs_getpages(struct vop_getpages_args *ap)
vm_page_unlock(pages[i]);
}
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
@ -189,7 +190,7 @@ nfs_getpages(struct vop_getpages_args *ap)
*/
size = count - uio.uio_resid;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
@ -225,7 +226,7 @@ nfs_getpages(struct vop_getpages_args *ap)
if (i != ap->a_reqpage)
vm_page_readahead_finish(m);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (0);
}
@ -1296,9 +1297,9 @@ nfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
* Now, flush as required.
*/
if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
/*
* If the page clean was interrupted, fail the invalidation.
* Not doing so, we run the risk of losing dirty pages in the

View File

@ -629,9 +629,9 @@ nfs_close(struct vop_close_args *ap)
* mmap'ed writes or via write().
*/
if (nfs_clean_pages_on_close && vp->v_object) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
mtx_lock(&np->n_mtx);
if (np->n_flag & NMODIFIED) {

View File

@ -87,6 +87,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@ -3332,9 +3333,9 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
*/
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
} else {
@ -3363,10 +3364,10 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
VM_OBJECT_LOCK(vp->v_object);
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, off, off + cnt,
OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_object);
VM_OBJECT_WUNLOCK(vp->v_object);
}
bo = &vp->v_bufobj;

View File

@ -140,10 +140,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
struct page *page = sg_page(&chunk->page_list[i]);
if (umem->writable && dirty) {
if (object && object != page->object)
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (object != page->object) {
object = page->object;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
vm_page_dirty(page);
}
@ -151,7 +151,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
kfree(chunk);
}
if (object)
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
#endif
}

View File

@ -37,6 +37,7 @@
#include <sys/fcntl.h>
#include <sys/file.h>
#include <sys/filio.h>
#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/pmap.h>

View File

@ -80,6 +80,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#ifdef SMP

View File

@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
#include <sys/rwlock.h>
#include <sys/signalvar.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>

View File

@ -1122,7 +1122,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
/* XXX change the pvo head for fake pages */
if ((m->oflags & VPO_UNMANAGED) != 0) {
@ -1291,7 +1291,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have PTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@ -1331,7 +1331,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("moea_clear_modify: page %p is busy", m));
@ -1366,7 +1366,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;

View File

@ -1184,7 +1184,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
}
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
/* XXX change the pvo head for fake pages */
if ((m->oflags & VPO_UNMANAGED) != 0) {
@ -1447,7 +1447,7 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can have LPTE_CHG set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (FALSE);
@ -1482,7 +1482,7 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea64_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("moea64_clear_modify: page %p is busy", m));
@ -1515,7 +1515,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;

View File

@ -101,6 +101,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/exec.h>
#include <sys/ktr.h>

View File

@ -1561,7 +1561,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
("mmu_booke_enter_locked: user pmap, non user va"));
}
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@ -1958,7 +1958,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;
@ -2173,7 +2173,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no PTEs can be modified.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@ -2245,7 +2245,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("mmu_booke_clear_modify: page %p is busy", m));
@ -2660,7 +2660,7 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("mmu_booke_object_init_pt: non-device object"));
}

View File

@ -54,9 +54,9 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/mac.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sdt.h>
#include <sys/systm.h>
@ -284,14 +284,14 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
object = vme->object.vm_object;
if (object == NULL)
continue;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_WLOCK(backing_object);
offset += object->backing_object_offset;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
object = backing_object;
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* At the moment, vm_maps and objects aren't considered by
* the MAC system, so only things with backing by a normal
@ -334,10 +334,10 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
vm_object_reference(object);
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_object_page_clean(object, offset, offset +
vme->end - vme->start, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
vm_object_deallocate(object);

View File

@ -65,6 +65,7 @@ __FBSDID("$FreeBSD$");
#include <sys/pcpu.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
#include <sys/rwlock.h>
#include <sys/signalvar.h>
#include <sys/smp.h>
#include <sys/syscallsubr.h>

View File

@ -1229,7 +1229,7 @@ pmap_pinit(pmap_t pm)
pm->pm_context[i] = -1;
CPU_ZERO(&pm->pm_active);
VM_OBJECT_LOCK(pm->pm_tsb_obj);
VM_OBJECT_WLOCK(pm->pm_tsb_obj);
for (i = 0; i < TSB_PAGES; i++) {
m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
@ -1237,7 +1237,7 @@ pmap_pinit(pmap_t pm)
m->md.pmap = pm;
ma[i] = m;
}
VM_OBJECT_UNLOCK(pm->pm_tsb_obj);
VM_OBJECT_WUNLOCK(pm->pm_tsb_obj);
pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
@ -1291,7 +1291,7 @@ pmap_release(pmap_t pm)
pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
obj = pm->pm_tsb_obj;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
while (!TAILQ_EMPTY(&obj->memq)) {
m = TAILQ_FIRST(&obj->memq);
@ -1300,7 +1300,7 @@ pmap_release(pmap_t pm)
atomic_subtract_int(&cnt.v_wire_count, 1);
vm_page_free_zero(m);
}
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
PMAP_LOCK_DESTROY(pm);
}
@ -1495,7 +1495,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
rw_assert(&tte_list_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
@ -1662,7 +1662,7 @@ pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
("pmap_object_init_pt: non-device object"));
}
@ -2060,7 +2060,7 @@ pmap_is_modified(vm_page_t m)
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no TTEs can have TD_W set.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return (rv);
@ -2128,7 +2128,7 @@ pmap_clear_modify(vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_modify: page %p is not managed", m));
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("pmap_clear_modify: page %p is busy", m));
@ -2183,7 +2183,7 @@ pmap_remove_write(vm_page_t m)
* another thread while the object is locked. Thus, if PGA_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->aflags & PGA_WRITEABLE) == 0)
return;

View File

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ttycom.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/rwlock.h>
#include <ufs/ufs/extattr.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
@ -143,9 +144,9 @@ ffs_rawread_sync(struct vnode *vp)
if ((obj = vp->v_object) != NULL &&
(obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
VI_UNLOCK(vp);
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
} else
VI_UNLOCK(vp);

View File

@ -75,6 +75,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/priv.h>
#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@ -842,7 +843,7 @@ ffs_getpages(ap)
* user programs might reference data beyond the actual end of file
* occuring within the page. We have to zero that data.
*/
VM_OBJECT_LOCK(mreq->object);
VM_OBJECT_WLOCK(mreq->object);
if (mreq->valid) {
if (mreq->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(mreq, TRUE);
@ -853,10 +854,10 @@ ffs_getpages(ap)
vm_page_unlock(ap->a_m[i]);
}
}
VM_OBJECT_UNLOCK(mreq->object);
VM_OBJECT_WUNLOCK(mreq->object);
return VM_PAGER_OK;
}
VM_OBJECT_UNLOCK(mreq->object);
VM_OBJECT_WUNLOCK(mreq->object);
return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
ap->a_count,

View File

@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
@ -91,10 +91,10 @@ default_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(round_page(offset + size)));
if (cred != NULL) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
object->cred = cred;
object->charge = size;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
return (object);
}

View File

@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/rwlock.h>
#include <sys/sx.h>
#include <vm/vm.h>
@ -206,7 +207,7 @@ void
cdev_pager_free_page(vm_object_t object, vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type == OBJT_MGTDEVICE) {
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
pmap_remove_all(m);
@ -221,7 +222,7 @@ static void
dev_pager_free_page(vm_object_t object, vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->type == OBJT_DEVICE &&
(m->oflags & VPO_UNMANAGED) != 0),
("Managed device or page obj %p m %p", object, m));
@ -235,13 +236,13 @@ dev_pager_dealloc(object)
{
vm_page_t m;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
object->un_pager.devp.ops->cdev_pg_dtor(object->un_pager.devp.dev);
mtx_lock(&dev_pager_mtx);
TAILQ_REMOVE(&dev_pager_object_list, object, pager_object_list);
mtx_unlock(&dev_pager_mtx);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (object->type == OBJT_DEVICE) {
/*
@ -258,11 +259,11 @@ dev_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int reqpage)
{
int error, i;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
error = object->un_pager.devp.ops->cdev_pg_fault(object,
IDX_TO_OFF(ma[reqpage]->pindex), PROT_READ, &ma[reqpage]);
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
for (i = 0; i < count; i++) {
if (i != reqpage) {
@ -304,12 +305,12 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
pidx = OFF_TO_IDX(offset);
memattr = object->memattr;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
dev = object->handle;
csw = dev_refthread(dev, &ref);
if (csw == NULL) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
return (VM_PAGER_FAIL);
}
td = curthread;
@ -321,7 +322,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
if (ret != 0) {
printf(
"WARNING: dev_pager_getpage: map function returns error %d", ret);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
return (VM_PAGER_FAIL);
}
@ -338,7 +339,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
* the new physical address.
*/
page = *mres;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_page_updatefake(page, paddr, memattr);
} else {
/*
@ -346,7 +347,7 @@ old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
* free up the all of the original pages.
*/
page = vm_page_getfake(paddr, memattr);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_page_lock(*mres);
vm_page_free(*mres);
vm_page_unlock(*mres);

View File

@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@ -123,11 +124,11 @@ phys_pager_dealloc(vm_object_t object)
{
if (object->handle != NULL) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
mtx_lock(&phys_pager_mtx);
TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list);
mtx_unlock(&phys_pager_mtx);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
}
@ -139,7 +140,7 @@ phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
int i;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
for (i = 0; i < count; i++) {
if (m[i]->valid == 0) {
if ((m[i]->flags & PG_ZERO) == 0)

View File

@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/sglist.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -142,10 +143,10 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
size_t space;
int i;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
sg = object->handle;
memattr = object->memattr;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
offset = m[reqpage]->pindex;
/*
@ -180,7 +181,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
/* Construct a new fake page. */
page = vm_page_getfake(paddr, memattr);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq);
/* Free the original pages and insert this fake page into the object. */

View File

@ -89,6 +89,7 @@ __FBSDID("$FreeBSD$");
#include <sys/racct.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/sysproto.h>
#include <sys/blist.h>
@ -621,14 +622,14 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
object->handle = handle;
if (cred != NULL) {
object->cred = cred;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
sx_xunlock(&sw_alloc_sx);
mtx_unlock(&Giant);
@ -639,13 +640,13 @@ swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
crhold(cred);
}
object = vm_object_allocate(OBJT_DEFAULT, pindex);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (cred != NULL) {
object->cred = cred;
object->charge = size;
}
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
return (object);
}
@ -674,7 +675,7 @@ swap_pager_dealloc(vm_object_t object)
mtx_unlock(&sw_alloc_mtx);
}
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
vm_object_pip_wait(object, "swpdea");
/*
@ -815,7 +816,7 @@ void
swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
swp_pager_meta_free(object, start, size);
}
@ -834,7 +835,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
daddr_t blk = SWAPBLK_NONE;
vm_pindex_t beg = start; /* save start index */
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
while (size) {
if (n == 0) {
n = BLIST_MAX_ALLOC;
@ -842,7 +843,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
n >>= 1;
if (n == 0) {
swp_pager_meta_free(object, beg, start - beg);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (-1);
}
}
@ -854,7 +855,7 @@ swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
--n;
}
swp_pager_meta_free(object, start, n);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (0);
}
@ -883,8 +884,8 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
{
vm_pindex_t i;
VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(srcobject);
VM_OBJECT_ASSERT_WLOCKED(dstobject);
/*
* If destroysource is set, we remove the source object from the
@ -934,11 +935,11 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
* swp_pager_meta_build() can sleep.
*/
vm_object_pip_add(srcobject, 1);
VM_OBJECT_UNLOCK(srcobject);
VM_OBJECT_WUNLOCK(srcobject);
vm_object_pip_add(dstobject, 1);
swp_pager_meta_build(dstobject, i, srcaddr);
vm_object_pip_wakeup(dstobject);
VM_OBJECT_LOCK(srcobject);
VM_OBJECT_WLOCK(srcobject);
vm_object_pip_wakeup(srcobject);
}
} else {
@ -987,7 +988,7 @@ swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *aft
{
daddr_t blk0;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* do we have good backing store at the requested index ?
*/
@ -1058,7 +1059,7 @@ static void
swap_pager_unswapped(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
}
@ -1147,7 +1148,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
/*
* Getpbuf() can sleep.
*/
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* Get a swap buffer header to perform the IO
*/
@ -1168,7 +1169,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
bp->b_bufsize = PAGE_SIZE * (j - i);
bp->b_pager.pg_reqpage = reqpage - i;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
{
int k;
@ -1187,7 +1188,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
* does not remove it.
*/
vm_object_pip_add(object, bp->b_npages);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* perform the I/O. NOTE!!! bp cannot be considered valid after
@ -1208,7 +1209,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
* cleared on completion. If an I/O error occurs, SWAPBLK_NONE
* is set in the meta-data.
*/
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
while ((mreq->oflags & VPO_SWAPINPROG) != 0) {
mreq->oflags |= VPO_WANTED;
PCPU_INC(cnt.v_intrans);
@ -1283,7 +1284,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
*/
if (object->type != OBJT_SWAP)
swp_pager_meta_build(object, 0, SWAPBLK_NONE);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (curproc != pageproc)
sync = TRUE;
@ -1378,7 +1379,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
bp->b_bufsize = PAGE_SIZE * n;
bp->b_blkno = blk;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (j = 0; j < n; ++j) {
vm_page_t mreq = m[i+j];
@ -1393,7 +1394,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
mreq->oflags |= VPO_SWAPINPROG;
bp->b_pages[j] = mreq;
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
bp->b_npages = n;
/*
* Must set dirty range for NFS to work.
@ -1443,7 +1444,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
*/
swp_pager_async_iodone(bp);
}
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
/*
@ -1487,7 +1488,7 @@ swp_pager_async_iodone(struct buf *bp)
if (bp->b_npages) {
object = bp->b_pages[0]->object;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
/*
@ -1611,7 +1612,7 @@ swp_pager_async_iodone(struct buf *bp)
*/
if (object != NULL) {
vm_object_pip_wakeupn(object, bp->b_npages);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
/*
@ -1652,7 +1653,7 @@ swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
int bcount;
int i;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return (0);
@ -1746,13 +1747,13 @@ swap_pager_swapoff(struct swdevt *sp)
for (j = 0; j < SWAP_META_PAGES; ++j) {
if (swp_pager_isondev(swap->swb_pages[j], sp)) {
/* avoid deadlock */
if (!VM_OBJECT_TRYLOCK(object)) {
if (!VM_OBJECT_TRYWLOCK(object)) {
break;
} else {
mtx_unlock(&swhash_mtx);
swp_pager_force_pagein(object,
pindex + j);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
mtx_lock(&swhash_mtx);
goto restart;
}
@ -1808,7 +1809,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
struct swblock **pswap;
int idx;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Convert default object to swap object if necessary
*/
@ -1845,7 +1846,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
if (swap == NULL) {
mtx_unlock(&swhash_mtx);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (uma_zone_exhausted(swap_zone)) {
if (atomic_cmpset_int(&exhausted, 0, 1))
printf("swap zone exhausted, "
@ -1854,7 +1855,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
pause("swzonex", 10);
} else
VM_WAIT;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
goto retry;
}
@ -1906,7 +1907,7 @@ static void
swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return;
@ -1952,7 +1953,7 @@ swp_pager_meta_free_all(vm_object_t object)
{
daddr_t index = 0;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_SWAP)
return;
@ -2011,7 +2012,7 @@ swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
daddr_t r1;
int idx;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* The meta data only exists of the object is OBJT_SWAP
* and even then might not be allocated yet.
@ -2464,14 +2465,14 @@ vmspace_swap_count(struct vmspace *vmspace)
for (cur = map->header.next; cur != &map->header; cur = cur->next) {
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
(object = cur->object.vm_object) != NULL) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (object->type == OBJT_SWAP &&
object->un_pager.swp.swp_bcount != 0) {
n = (cur->end - cur->start) / PAGE_SIZE;
count += object->un_pager.swp.swp_bcount *
SWAP_META_PAGES * n / object->size + 1;
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
}
return (count);

View File

@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/smp.h>
#include <sys/vmmeter.h>

View File

@ -81,9 +81,9 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@ -163,14 +163,14 @@ unlock_and_deallocate(struct faultstate *fs)
{
vm_object_pip_wakeup(fs->object);
VM_OBJECT_UNLOCK(fs->object);
VM_OBJECT_WUNLOCK(fs->object);
if (fs->object != fs->first_object) {
VM_OBJECT_LOCK(fs->first_object);
VM_OBJECT_WLOCK(fs->first_object);
vm_page_lock(fs->first_m);
vm_page_free(fs->first_m);
vm_page_unlock(fs->first_m);
vm_object_pip_wakeup(fs->first_object);
VM_OBJECT_UNLOCK(fs->first_object);
VM_OBJECT_WUNLOCK(fs->first_object);
fs->first_m = NULL;
}
vm_object_deallocate(fs->first_object);
@ -290,7 +290,7 @@ RetryFault:;
* truncation operations) during I/O. This must be done after
* obtaining the vnode lock in order to avoid possible deadlocks.
*/
VM_OBJECT_LOCK(fs.first_object);
VM_OBJECT_WLOCK(fs.first_object);
vm_object_reference_locked(fs.first_object);
vm_object_pip_add(fs.first_object, 1);
@ -363,17 +363,17 @@ RetryFault:;
vm_page_aflag_set(fs.m, PGA_REFERENCED);
vm_page_unlock(fs.m);
if (fs.object != fs.first_object) {
if (!VM_OBJECT_TRYLOCK(
if (!VM_OBJECT_TRYWLOCK(
fs.first_object)) {
VM_OBJECT_UNLOCK(fs.object);
VM_OBJECT_LOCK(fs.first_object);
VM_OBJECT_LOCK(fs.object);
VM_OBJECT_WUNLOCK(fs.object);
VM_OBJECT_WLOCK(fs.first_object);
VM_OBJECT_WLOCK(fs.object);
}
vm_page_lock(fs.first_m);
vm_page_free(fs.first_m);
vm_page_unlock(fs.first_m);
vm_object_pip_wakeup(fs.first_object);
VM_OBJECT_UNLOCK(fs.first_object);
VM_OBJECT_WUNLOCK(fs.first_object);
fs.first_m = NULL;
}
unlock_map(&fs);
@ -383,7 +383,7 @@ RetryFault:;
"vmpfw");
}
vm_object_pip_wakeup(fs.object);
VM_OBJECT_UNLOCK(fs.object);
VM_OBJECT_WUNLOCK(fs.object);
PCPU_INC(cnt.v_intrans);
vm_object_deallocate(fs.first_object);
goto RetryFault;
@ -646,12 +646,12 @@ RetryFault:;
*/
if (fs.object != fs.first_object) {
vm_object_pip_wakeup(fs.object);
VM_OBJECT_UNLOCK(fs.object);
VM_OBJECT_WUNLOCK(fs.object);
fs.object = fs.first_object;
fs.pindex = fs.first_pindex;
fs.m = fs.first_m;
VM_OBJECT_LOCK(fs.object);
VM_OBJECT_WLOCK(fs.object);
}
fs.first_m = NULL;
@ -669,11 +669,11 @@ RetryFault:;
} else {
KASSERT(fs.object != next_object,
("object loop %p", next_object));
VM_OBJECT_LOCK(next_object);
VM_OBJECT_WLOCK(next_object);
vm_object_pip_add(next_object, 1);
if (fs.object != fs.first_object)
vm_object_pip_wakeup(fs.object);
VM_OBJECT_UNLOCK(fs.object);
VM_OBJECT_WUNLOCK(fs.object);
fs.object = next_object;
}
}
@ -725,7 +725,7 @@ RetryFault:;
*/
((fs.object->type == OBJT_DEFAULT) ||
(fs.object->type == OBJT_SWAP)) &&
(is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) &&
(is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) &&
/*
* We don't chase down the shadow chain
*/
@ -774,7 +774,7 @@ RetryFault:;
* conditional
*/
vm_object_pip_wakeup(fs.object);
VM_OBJECT_UNLOCK(fs.object);
VM_OBJECT_WUNLOCK(fs.object);
/*
* Only use the new page below...
*/
@ -782,7 +782,7 @@ RetryFault:;
fs.pindex = fs.first_pindex;
fs.m = fs.first_m;
if (!is_first_object_locked)
VM_OBJECT_LOCK(fs.object);
VM_OBJECT_WLOCK(fs.object);
PCPU_INC(cnt.v_cow_faults);
curthread->td_cow++;
} else {
@ -903,7 +903,7 @@ RetryFault:;
*/
KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
("vm_fault: page %p partially invalid", fs.m));
VM_OBJECT_UNLOCK(fs.object);
VM_OBJECT_WUNLOCK(fs.object);
/*
* Put this page into the physical map. We had to do the unlock above
@ -914,7 +914,7 @@ RetryFault:;
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0)
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
VM_OBJECT_LOCK(fs.object);
VM_OBJECT_WLOCK(fs.object);
vm_page_lock(fs.m);
/*
@ -960,13 +960,13 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
vm_pindex_t pindex;
object = fs->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
first_object = fs->first_object;
if (first_object != object) {
if (!VM_OBJECT_TRYLOCK(first_object)) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_LOCK(first_object);
VM_OBJECT_LOCK(object);
if (!VM_OBJECT_TRYWLOCK(first_object)) {
VM_OBJECT_WUNLOCK(object);
VM_OBJECT_WLOCK(first_object);
VM_OBJECT_WLOCK(object);
}
}
/* Neither fictitious nor unmanaged pages can be cached. */
@ -999,7 +999,7 @@ vm_fault_cache_behind(const struct faultstate *fs, int distance)
}
}
if (first_object != object)
VM_OBJECT_UNLOCK(first_object);
VM_OBJECT_WUNLOCK(first_object);
}
/*
@ -1044,28 +1044,28 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
lobject = object;
VM_OBJECT_LOCK(lobject);
VM_OBJECT_WLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
lobject->type == OBJT_DEFAULT &&
(backing_object = lobject->backing_object) != NULL) {
KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
0, ("vm_fault_prefault: unaligned object offset"));
pindex += lobject->backing_object_offset >> PAGE_SHIFT;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_UNLOCK(lobject);
VM_OBJECT_WLOCK(backing_object);
VM_OBJECT_WUNLOCK(lobject);
lobject = backing_object;
}
/*
* give-up when a page is not in memory
*/
if (m == NULL) {
VM_OBJECT_UNLOCK(lobject);
VM_OBJECT_WUNLOCK(lobject);
break;
}
if (m->valid == VM_PAGE_BITS_ALL &&
(m->flags & PG_FICTITIOUS) == 0)
pmap_enter_quick(pmap, addr, m, entry->protection);
VM_OBJECT_UNLOCK(lobject);
VM_OBJECT_WUNLOCK(lobject);
}
}
@ -1257,7 +1257,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
dst_object->pg_color = atop(dst_entry->start);
#endif
VM_OBJECT_LOCK(dst_object);
VM_OBJECT_WLOCK(dst_object);
KASSERT(upgrade || dst_entry->object.vm_object == NULL,
("vm_fault_copy_entry: vm_object not NULL"));
dst_entry->object.vm_object = dst_object;
@ -1307,9 +1307,9 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
dst_m = vm_page_alloc(dst_object, dst_pindex,
VM_ALLOC_NORMAL);
if (dst_m == NULL) {
VM_OBJECT_UNLOCK(dst_object);
VM_OBJECT_WUNLOCK(dst_object);
VM_WAIT;
VM_OBJECT_LOCK(dst_object);
VM_OBJECT_WLOCK(dst_object);
}
} while (dst_m == NULL);
@ -1318,7 +1318,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
* (Because the source is wired down, the page will be in
* memory.)
*/
VM_OBJECT_LOCK(src_object);
VM_OBJECT_WLOCK(src_object);
object = src_object;
pindex = src_pindex + dst_pindex;
while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
@ -1327,18 +1327,18 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
/*
* Allow fallback to backing objects if we are reading.
*/
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_WLOCK(backing_object);
pindex += OFF_TO_IDX(object->backing_object_offset);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
object = backing_object;
}
if (src_m == NULL)
panic("vm_fault_copy_wired: page missing");
pmap_copy_page(src_m, dst_m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
dst_m->valid = VM_PAGE_BITS_ALL;
dst_m->dirty = VM_PAGE_BITS_ALL;
VM_OBJECT_UNLOCK(dst_object);
VM_OBJECT_WUNLOCK(dst_object);
/*
* Enter it in the pmap. If a wired, copy-on-write
@ -1350,7 +1350,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
/*
* Mark it no longer busy, and put it on the active list.
*/
VM_OBJECT_LOCK(dst_object);
VM_OBJECT_WLOCK(dst_object);
if (upgrade) {
vm_page_lock(src_m);
@ -1367,7 +1367,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
}
vm_page_wakeup(dst_m);
}
VM_OBJECT_UNLOCK(dst_object);
VM_OBJECT_WUNLOCK(dst_object);
if (upgrade) {
dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
vm_object_deallocate(src_object);
@ -1403,7 +1403,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
vm_page_t rtm;
int cbehind, cahead;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
object = m->object;
pindex = m->pindex;

View File

@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/shm.h>
@ -238,7 +239,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
vm_pindex_t pindex;
int rv;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
pindex = OFF_TO_IDX(offset);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
@ -260,7 +261,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
vm_page_unlock(m);
vm_page_wakeup(m);
out:
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (m);
}
@ -394,7 +395,7 @@ vm_thread_new(struct thread *td, int pages)
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
VM_OBJECT_LOCK(ksobj);
VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
@ -404,7 +405,7 @@ vm_thread_new(struct thread *td, int pages)
ma[i] = m;
m->valid = VM_PAGE_BITS_ALL;
}
VM_OBJECT_UNLOCK(ksobj);
VM_OBJECT_WUNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
return (1);
}
@ -417,7 +418,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
atomic_add_int(&kstacks, -1);
pmap_qremove(ks, pages);
VM_OBJECT_LOCK(ksobj);
VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
@ -427,7 +428,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
vm_page_free(m);
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(ksobj);
VM_OBJECT_WUNLOCK(ksobj);
vm_object_deallocate(ksobj);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
@ -505,7 +506,7 @@ vm_thread_swapout(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
pmap_qremove(td->td_kstack, pages);
VM_OBJECT_LOCK(ksobj);
VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
@ -515,7 +516,7 @@ vm_thread_swapout(struct thread *td)
vm_page_unwire(m, 0);
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(ksobj);
VM_OBJECT_WUNLOCK(ksobj);
}
/*
@ -530,7 +531,7 @@ vm_thread_swapin(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
VM_OBJECT_LOCK(ksobj);
VM_OBJECT_WLOCK(ksobj);
for (i = 0; i < pages; i++)
ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
VM_ALLOC_WIRED);
@ -557,7 +558,7 @@ vm_thread_swapin(struct thread *td)
} else if (ma[i]->oflags & VPO_BUSY)
vm_page_wakeup(ma[i]);
}
VM_OBJECT_UNLOCK(ksobj);
VM_OBJECT_WUNLOCK(ksobj);
pmap_qenter(td->td_kstack, ma, pages);
cpu_thread_swapin(td);
}

View File

@ -68,8 +68,8 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/selinfo.h>

View File

@ -70,9 +70,9 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h> /* for ticks and hz */
#include <sys/eventhandler.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@ -234,7 +234,7 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
VM_PROT_ALL, 0);
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
end_offset = offset + size;
for (; offset < end_offset; offset += PAGE_SIZE) {
tries = 0;
@ -242,12 +242,12 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 1,
low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
vm_map_lock(map);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
@ -266,7 +266,7 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vm_map_unlock(map);
vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
VM_MAP_WIRE_NOHOLES);
@ -303,18 +303,18 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_map_insert(map, object, offset, addr, addr + size, VM_PROT_ALL,
VM_PROT_ALL, 0);
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
tries = 0;
retry:
m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
atop(size), low, high, alignment, boundary, memattr);
if (m == NULL) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_map_unlock(map);
vm_pageout_grow_cache(tries, low, high);
vm_map_lock(map);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
tries++;
goto retry;
}
@ -328,7 +328,7 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vm_map_unlock(map);
vm_map_wire(map, addr, addr + size, VM_MAP_WIRE_SYSTEM |
VM_MAP_WIRE_NOHOLES);
@ -488,7 +488,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
VM_OBJECT_LOCK(kmem_object);
VM_OBJECT_WLOCK(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
retry:
m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
@ -500,7 +500,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
*/
if (m == NULL) {
if ((flags & M_NOWAIT) == 0) {
VM_OBJECT_UNLOCK(kmem_object);
VM_OBJECT_WUNLOCK(kmem_object);
entry->eflags |= MAP_ENTRY_IN_TRANSITION;
vm_map_unlock(map);
VM_WAIT;
@ -510,7 +510,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
MAP_ENTRY_IN_TRANSITION,
("kmem_back: volatile entry"));
entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
VM_OBJECT_LOCK(kmem_object);
VM_OBJECT_WLOCK(kmem_object);
goto retry;
}
/*
@ -526,7 +526,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
vm_page_unwire(m, 0);
vm_page_free(m);
}
VM_OBJECT_UNLOCK(kmem_object);
VM_OBJECT_WUNLOCK(kmem_object);
vm_map_delete(map, addr, addr + size);
return (KERN_NO_SPACE);
}
@ -536,7 +536,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("kmem_malloc: page %p is managed", m));
}
VM_OBJECT_UNLOCK(kmem_object);
VM_OBJECT_WUNLOCK(kmem_object);
/*
* Mark map entry as non-pageable. Repeat the assert.
@ -556,7 +556,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
/*
* Loop thru pages, entering them in the pmap.
*/
VM_OBJECT_LOCK(kmem_object);
VM_OBJECT_WLOCK(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
/*
@ -566,7 +566,7 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
TRUE);
vm_page_wakeup(m);
}
VM_OBJECT_UNLOCK(kmem_object);
VM_OBJECT_WUNLOCK(kmem_object);
return (KERN_SUCCESS);
}

View File

@ -77,6 +77,7 @@ __FBSDID("$FreeBSD$");
#include <sys/vnode.h>
#include <sys/racct.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/file.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@ -1222,10 +1223,10 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
* reference counting is insufficient to recognize
* aliases with precision.)
*/
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (object->ref_count > 1 || object->shadow_count != 0)
vm_object_clear_flag(object, OBJ_ONEMAPPING);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
else if ((prev_entry != &map->header) &&
(prev_entry->eflags == protoeflags) &&
@ -1623,12 +1624,12 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->cred != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
VM_OBJECT_WLOCK(entry->object.vm_object);
KASSERT(entry->object.vm_object->cred == NULL,
("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = entry->end - entry->start;
VM_OBJECT_UNLOCK(entry->object.vm_object);
VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
@ -1700,12 +1701,12 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
} else if (entry->object.vm_object != NULL &&
((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
entry->cred != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
VM_OBJECT_WLOCK(entry->object.vm_object);
KASSERT(entry->object.vm_object->cred == NULL,
("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = entry->end - entry->start;
VM_OBJECT_UNLOCK(entry->object.vm_object);
VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
@ -1805,7 +1806,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
return;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
pmap_object_init_pt(map->pmap, addr, object, pindex, size);
goto unlock_return;
@ -1856,7 +1857,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
pmap_enter_object(map->pmap, start, addr + ptoa(psize),
p_start, prot);
unlock_return:
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
/*
@ -1932,9 +1933,9 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
continue;
}
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
continue;
}
@ -1946,7 +1947,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
KASSERT(obj->charge == 0,
("vm_map_protect: object %p overcharged\n", obj));
if (!swap_reserve(ptoa(obj->size))) {
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
vm_map_unlock(map);
return (KERN_RESOURCE_SHORTAGE);
}
@ -1954,7 +1955,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
crhold(cred);
obj->cred = cred;
obj->charge = ptoa(obj->size);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
}
/*
@ -2717,7 +2718,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
count = OFF_TO_IDX(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (object->ref_count != 1 &&
((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
object == kernel_object || object == kmem_object)) {
@ -2746,7 +2747,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
}
}
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
} else
entry->object.vm_object = NULL;
if (map->system_map)
@ -2954,7 +2955,7 @@ vm_map_copy_entry(
*/
size = src_entry->end - src_entry->start;
if ((src_object = src_entry->object.vm_object) != NULL) {
VM_OBJECT_LOCK(src_object);
VM_OBJECT_WLOCK(src_object);
charged = ENTRY_CHARGED(src_entry);
if ((src_object->handle == NULL) &&
(src_object->type == OBJT_DEFAULT ||
@ -2975,7 +2976,7 @@ vm_map_copy_entry(
src_object->cred = src_entry->cred;
src_object->charge = size;
}
VM_OBJECT_UNLOCK(src_object);
VM_OBJECT_WUNLOCK(src_object);
dst_entry->object.vm_object = src_object;
if (charged) {
cred = curthread->td_ucred;
@ -3151,7 +3152,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
vm_object_deallocate(object);
object = old_entry->object.vm_object;
}
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_object_clear_flag(object, OBJ_ONEMAPPING);
if (old_entry->cred != NULL) {
KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
@ -3159,7 +3160,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
object->charge = old_entry->end - old_entry->start;
old_entry->cred = NULL;
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* Clone the entry, referencing the shared object.
@ -3845,10 +3846,10 @@ RetryLookup:;
crfree(entry->cred);
entry->cred = NULL;
} else if (entry->cred != NULL) {
VM_OBJECT_LOCK(eobject);
VM_OBJECT_WLOCK(eobject);
eobject->cred = entry->cred;
eobject->charge = size;
VM_OBJECT_UNLOCK(eobject);
VM_OBJECT_WUNLOCK(eobject);
entry->cred = NULL;
}
@ -3873,10 +3874,10 @@ RetryLookup:;
atop(size));
entry->offset = 0;
if (entry->cred != NULL) {
VM_OBJECT_LOCK(entry->object.vm_object);
VM_OBJECT_WLOCK(entry->object.vm_object);
entry->object.vm_object->cred = entry->cred;
entry->object.vm_object->charge = size;
VM_OBJECT_UNLOCK(entry->object.vm_object);
VM_OBJECT_WUNLOCK(entry->object.vm_object);
entry->cred = NULL;
}
vm_map_lock_downgrade(map);

View File

@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resource.h>
#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/vmmeter.h>
#include <sys/smp.h>
@ -110,7 +111,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
*/
mtx_lock(&vm_object_list_mtx);
TAILQ_FOREACH(object, &vm_object_list, object_list) {
if (!VM_OBJECT_TRYLOCK(object)) {
if (!VM_OBJECT_TRYWLOCK(object)) {
/*
* Avoid a lock-order reversal. Consequently,
* the reported number of active pages may be
@ -119,7 +120,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
continue;
}
vm_object_clear_flag(object, OBJ_ACTIVE);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
mtx_unlock(&vm_object_list_mtx);
/*
@ -178,10 +179,10 @@ vmtotal(SYSCTL_HANDLER_ARGS)
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) ||
(object = entry->object.vm_object) == NULL)
continue;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_object_set_flag(object, OBJ_ACTIVE);
paging |= object->paging_in_progress;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
vm_map_unlock_read(map);
vmspace_free(vm);

View File

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/racct.h>
#include <sys/resource.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <sys/fcntl.h>
@ -880,12 +881,12 @@ sys_mincore(td, uap)
m = PHYS_TO_VM_PAGE(locked_pa);
if (m->object != object) {
if (object != NULL)
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
object = m->object;
locked = VM_OBJECT_TRYLOCK(object);
locked = VM_OBJECT_TRYWLOCK(object);
vm_page_unlock(m);
if (!locked) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_page_lock(m);
goto retry;
}
@ -903,9 +904,9 @@ sys_mincore(td, uap)
*/
if (current->object.vm_object != object) {
if (object != NULL)
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
object = current->object.vm_object;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
if (object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP ||
@ -942,7 +943,7 @@ sys_mincore(td, uap)
mincoreinfo |= MINCORE_REFERENCED_OTHER;
}
if (object != NULL)
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* subyte may page fault. In case it needs to modify

View File

@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h> /* for curproc, pageproc */
#include <sys/socket.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/sx.h>
@ -194,8 +195,8 @@ vm_object_zinit(void *mem, int size, int flags)
vm_object_t object;
object = (vm_object_t)mem;
bzero(&object->mtx, sizeof(object->mtx));
mtx_init(&object->mtx, "vm object", NULL, MTX_DEF | MTX_DUPOK);
bzero(&object->lock, sizeof(object->lock));
rw_init_flags(&object->lock, "vm object", RW_DUPOK);
/* These are true for any object that has been freed */
object->root = NULL;
@ -266,7 +267,7 @@ vm_object_init(void)
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
mtx_init(&kernel_object->mtx, "vm object", "kernel object", MTX_DEF);
rw_init(&kernel_object->lock, "kernel vm object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
#if VM_NRESERVLEVEL > 0
@ -274,7 +275,7 @@ vm_object_init(void)
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
#endif
mtx_init(&kmem_object->mtx, "vm object", "kmem object", MTX_DEF);
rw_init(&kmem_object->lock, "kmem vm object");
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
#if VM_NRESERVLEVEL > 0
@ -300,7 +301,7 @@ void
vm_object_clear_flag(vm_object_t object, u_short bits)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
object->flags &= ~bits;
}
@ -317,7 +318,7 @@ int
vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
switch (object->type) {
case OBJT_DEFAULT:
case OBJT_DEVICE:
@ -343,7 +344,7 @@ void
vm_object_pip_add(vm_object_t object, short i)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress += i;
}
@ -351,7 +352,7 @@ void
vm_object_pip_subtract(vm_object_t object, short i)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress -= i;
}
@ -359,7 +360,7 @@ void
vm_object_pip_wakeup(vm_object_t object)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress--;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
@ -371,7 +372,7 @@ void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (i)
object->paging_in_progress -= i;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
@ -384,7 +385,7 @@ void
vm_object_pip_wait(vm_object_t object, char *waitid)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object, PVM, waitid, 0);
@ -418,9 +419,9 @@ vm_object_reference(vm_object_t object)
{
if (object == NULL)
return;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_object_reference_locked(object);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
/*
@ -435,7 +436,7 @@ vm_object_reference_locked(vm_object_t object)
{
struct vnode *vp;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
object->ref_count++;
if (object->type == OBJT_VNODE) {
vp = object->handle;
@ -451,7 +452,7 @@ vm_object_vndeallocate(vm_object_t object)
{
struct vnode *vp = (struct vnode *) object->handle;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
@ -464,23 +465,23 @@ vm_object_vndeallocate(vm_object_t object)
if (object->ref_count > 1) {
object->ref_count--;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/* vrele may need the vnode lock. */
vrele(vp);
} else {
vhold(vp);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vdrop(vp);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
object->ref_count--;
if (object->type == OBJT_DEAD) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
} else {
if (object->ref_count == 0)
VOP_UNSET_TEXT(vp);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vput(vp);
}
}
@ -503,7 +504,7 @@ vm_object_deallocate(vm_object_t object)
vm_object_t temp;
while (object != NULL) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (object->type == OBJT_VNODE) {
vm_object_vndeallocate(object);
return;
@ -520,7 +521,7 @@ vm_object_deallocate(vm_object_t object)
*/
object->ref_count--;
if (object->ref_count > 1) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return;
} else if (object->ref_count == 1) {
if (object->shadow_count == 0 &&
@ -539,12 +540,12 @@ vm_object_deallocate(vm_object_t object)
("vm_object_deallocate: ref_count: %d, shadow_count: %d",
object->ref_count,
object->shadow_count));
if (!VM_OBJECT_TRYLOCK(robject)) {
if (!VM_OBJECT_TRYWLOCK(robject)) {
/*
* Avoid a potential deadlock.
*/
object->ref_count++;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* More likely than not the thread
* holding robject's lock has lower
@ -568,27 +569,27 @@ vm_object_deallocate(vm_object_t object)
robject->ref_count++;
retry:
if (robject->paging_in_progress) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vm_object_pip_wait(robject,
"objde1");
temp = robject->backing_object;
if (object == temp) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
goto retry;
}
} else if (object->paging_in_progress) {
VM_OBJECT_UNLOCK(robject);
VM_OBJECT_WUNLOCK(robject);
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object,
PDROP | PVM, "objde2", 0);
VM_OBJECT_LOCK(robject);
VM_OBJECT_WLOCK(robject);
temp = robject->backing_object;
if (object == temp) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
goto retry;
}
} else
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (robject->ref_count == 1) {
robject->ref_count--;
@ -597,21 +598,21 @@ vm_object_deallocate(vm_object_t object)
}
object = robject;
vm_object_collapse(object);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
continue;
}
VM_OBJECT_UNLOCK(robject);
VM_OBJECT_WUNLOCK(robject);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return;
}
doterm:
temp = object->backing_object;
if (temp != NULL) {
VM_OBJECT_LOCK(temp);
VM_OBJECT_WLOCK(temp);
LIST_REMOVE(object, shadow_list);
temp->shadow_count--;
VM_OBJECT_UNLOCK(temp);
VM_OBJECT_WUNLOCK(temp);
object->backing_object = NULL;
}
/*
@ -622,7 +623,7 @@ vm_object_deallocate(vm_object_t object)
if ((object->flags & OBJ_DEAD) == 0)
vm_object_terminate(object);
else
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
object = temp;
}
}
@ -674,7 +675,7 @@ vm_object_terminate(vm_object_t object)
{
vm_page_t p, p_next;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Make sure no one uses us.
@ -700,11 +701,11 @@ vm_object_terminate(vm_object_t object)
* Clean pages and flush buffers.
*/
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vinvalbuf(vp, V_SAVE, 0, 0);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
KASSERT(object->ref_count == 0,
@ -759,7 +760,7 @@ vm_object_terminate(vm_object_t object)
* Let the pager know object is dead.
*/
vm_pager_deallocate(object);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vm_object_destroy(object);
}
@ -815,7 +816,7 @@ vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end,
int curgeneration, n, pagerflags;
boolean_t clearobjflags, eio, res;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
object->resident_page_count == 0)
@ -901,7 +902,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
int count, i, mreq, runlen;
vm_page_lock_assert(p, MA_NOTOWNED);
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
count = 1;
mreq = 0;
@ -959,11 +960,11 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
return (TRUE);
res = TRUE;
error = 0;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_WLOCK(backing_object);
offset += object->backing_object_offset;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
object = backing_object;
if (object->size < OFF_TO_IDX(offset + size))
size = IDX_TO_OFF(object->size) - offset;
@ -983,7 +984,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
if (object->type == OBJT_VNODE &&
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
vp = object->handle;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
(void) vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (syncio && !invalidate && offset == 0 &&
@ -1001,17 +1002,17 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0;
fsync_after = FALSE;
}
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
res = vm_object_page_clean(object, offset, offset + size,
flags);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (fsync_after)
error = VOP_FSYNC(vp, MNT_WAIT, curthread);
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
if (error != 0)
res = FALSE;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
if ((object->type == OBJT_VNODE ||
object->type == OBJT_DEVICE) && invalidate) {
@ -1029,7 +1030,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
vm_object_page_remove(object, OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK), flags);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (res);
}
@ -1064,7 +1065,7 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
if (object == NULL)
return;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
/*
* Locate and adjust resident pages
*/
@ -1105,10 +1106,10 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
backing_object = tobject->backing_object;
if (backing_object == NULL)
goto unlock_tobject;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_WLOCK(backing_object);
tpindex += OFF_TO_IDX(tobject->backing_object_offset);
if (tobject != object)
VM_OBJECT_UNLOCK(tobject);
VM_OBJECT_WUNLOCK(tobject);
tobject = backing_object;
goto shadowlookup;
} else if (m->valid != VM_PAGE_BITS_ALL)
@ -1136,10 +1137,10 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
}
vm_page_unlock(m);
if (object != tobject)
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
m->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(tobject, m, PDROP | PVM, "madvpo", 0);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
goto relookup;
}
if (advise == MADV_WILLNEED) {
@ -1172,9 +1173,9 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
swap_pager_freespace(tobject, tpindex, 1);
unlock_tobject:
if (tobject != object)
VM_OBJECT_UNLOCK(tobject);
VM_OBJECT_WUNLOCK(tobject);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
/*
@ -1202,15 +1203,15 @@ vm_object_shadow(
* Don't create the new object if the old object isn't shared.
*/
if (source != NULL) {
VM_OBJECT_LOCK(source);
VM_OBJECT_WLOCK(source);
if (source->ref_count == 1 &&
source->handle == NULL &&
(source->type == OBJT_DEFAULT ||
source->type == OBJT_SWAP)) {
VM_OBJECT_UNLOCK(source);
VM_OBJECT_WUNLOCK(source);
return;
}
VM_OBJECT_UNLOCK(source);
VM_OBJECT_WUNLOCK(source);
}
/*
@ -1235,7 +1236,7 @@ vm_object_shadow(
*/
result->backing_object_offset = *offset;
if (source != NULL) {
VM_OBJECT_LOCK(source);
VM_OBJECT_WLOCK(source);
LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
source->shadow_count++;
#if VM_NRESERVLEVEL > 0
@ -1243,7 +1244,7 @@ vm_object_shadow(
result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
((1 << (VM_NFREEORDER - 1)) - 1);
#endif
VM_OBJECT_UNLOCK(source);
VM_OBJECT_WUNLOCK(source);
}
@ -1274,7 +1275,7 @@ vm_object_split(vm_map_entry_t entry)
return;
if (orig_object->ref_count <= 1)
return;
VM_OBJECT_UNLOCK(orig_object);
VM_OBJECT_WUNLOCK(orig_object);
offidxstart = OFF_TO_IDX(entry->offset);
size = atop(entry->end - entry->start);
@ -1289,17 +1290,17 @@ vm_object_split(vm_map_entry_t entry)
* At this point, the new object is still private, so the order in
* which the original and new objects are locked does not matter.
*/
VM_OBJECT_LOCK(new_object);
VM_OBJECT_LOCK(orig_object);
VM_OBJECT_WLOCK(new_object);
VM_OBJECT_WLOCK(orig_object);
source = orig_object->backing_object;
if (source != NULL) {
VM_OBJECT_LOCK(source);
VM_OBJECT_WLOCK(source);
if ((source->flags & OBJ_DEAD) != 0) {
VM_OBJECT_UNLOCK(source);
VM_OBJECT_UNLOCK(orig_object);
VM_OBJECT_UNLOCK(new_object);
VM_OBJECT_WUNLOCK(source);
VM_OBJECT_WUNLOCK(orig_object);
VM_OBJECT_WUNLOCK(new_object);
vm_object_deallocate(new_object);
VM_OBJECT_LOCK(orig_object);
VM_OBJECT_WLOCK(orig_object);
return;
}
LIST_INSERT_HEAD(&source->shadow_head,
@ -1307,7 +1308,7 @@ vm_object_split(vm_map_entry_t entry)
source->shadow_count++;
vm_object_reference_locked(source); /* for new_object */
vm_object_clear_flag(source, OBJ_ONEMAPPING);
VM_OBJECT_UNLOCK(source);
VM_OBJECT_WUNLOCK(source);
new_object->backing_object_offset =
orig_object->backing_object_offset + entry->offset;
new_object->backing_object = source;
@ -1334,10 +1335,10 @@ vm_object_split(vm_map_entry_t entry)
* not be changed by this operation.
*/
if ((m->oflags & VPO_BUSY) || m->busy) {
VM_OBJECT_UNLOCK(new_object);
VM_OBJECT_WUNLOCK(new_object);
m->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(orig_object, m, PVM, "spltwt", 0);
VM_OBJECT_LOCK(new_object);
VM_OBJECT_WLOCK(new_object);
goto retry;
}
#if VM_NRESERVLEVEL > 0
@ -1381,14 +1382,14 @@ vm_object_split(vm_map_entry_t entry)
vm_page_cache_transfer(orig_object, offidxstart,
new_object);
}
VM_OBJECT_UNLOCK(orig_object);
VM_OBJECT_WUNLOCK(orig_object);
TAILQ_FOREACH(m, &new_object->memq, listq)
vm_page_wakeup(m);
VM_OBJECT_UNLOCK(new_object);
VM_OBJECT_WUNLOCK(new_object);
entry->object.vm_object = new_object;
entry->offset = 0LL;
vm_object_deallocate(orig_object);
VM_OBJECT_LOCK(new_object);
VM_OBJECT_WLOCK(new_object);
}
#define OBSC_TEST_ALL_SHADOWED 0x0001
@ -1403,8 +1404,8 @@ vm_object_backing_scan(vm_object_t object, int op)
vm_object_t backing_object;
vm_pindex_t backing_offset_index;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
VM_OBJECT_ASSERT_WLOCKED(object->backing_object);
backing_object = object->backing_object;
backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
@ -1492,12 +1493,12 @@ vm_object_backing_scan(vm_object_t object, int op)
}
} else if (op & OBSC_COLLAPSE_WAIT) {
if ((p->oflags & VPO_BUSY) || p->busy) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
p->oflags |= VPO_WANTED;
VM_OBJECT_SLEEP(backing_object, p,
PDROP | PVM, "vmocol", 0);
VM_OBJECT_LOCK(object);
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_WLOCK(object);
VM_OBJECT_WLOCK(backing_object);
/*
* If we slept, anything could have
* happened. Since the object is
@ -1624,8 +1625,8 @@ vm_object_qcollapse(vm_object_t object)
{
vm_object_t backing_object = object->backing_object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
VM_OBJECT_ASSERT_WLOCKED(backing_object);
if (backing_object->ref_count != 1)
return;
@ -1643,7 +1644,7 @@ vm_object_qcollapse(vm_object_t object)
void
vm_object_collapse(vm_object_t object)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
while (TRUE) {
vm_object_t backing_object;
@ -1660,7 +1661,7 @@ vm_object_collapse(vm_object_t object)
* we check the backing object first, because it is most likely
* not collapsable.
*/
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_WLOCK(backing_object);
if (backing_object->handle != NULL ||
(backing_object->type != OBJT_DEFAULT &&
backing_object->type != OBJT_SWAP) ||
@ -1669,7 +1670,7 @@ vm_object_collapse(vm_object_t object)
(object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP) ||
(object->flags & OBJ_DEAD)) {
VM_OBJECT_UNLOCK(backing_object);
VM_OBJECT_WUNLOCK(backing_object);
break;
}
@ -1678,7 +1679,7 @@ vm_object_collapse(vm_object_t object)
backing_object->paging_in_progress != 0
) {
vm_object_qcollapse(object);
VM_OBJECT_UNLOCK(backing_object);
VM_OBJECT_WUNLOCK(backing_object);
break;
}
/*
@ -1739,7 +1740,7 @@ vm_object_collapse(vm_object_t object)
LIST_REMOVE(object, shadow_list);
backing_object->shadow_count--;
if (backing_object->backing_object) {
VM_OBJECT_LOCK(backing_object->backing_object);
VM_OBJECT_WLOCK(backing_object->backing_object);
LIST_REMOVE(backing_object, shadow_list);
LIST_INSERT_HEAD(
&backing_object->backing_object->shadow_head,
@ -1747,7 +1748,7 @@ vm_object_collapse(vm_object_t object)
/*
* The shadow_count has not changed.
*/
VM_OBJECT_UNLOCK(backing_object->backing_object);
VM_OBJECT_WUNLOCK(backing_object->backing_object);
}
object->backing_object = backing_object->backing_object;
object->backing_object_offset +=
@ -1763,7 +1764,7 @@ vm_object_collapse(vm_object_t object)
KASSERT(backing_object->ref_count == 1, (
"backing_object %p was somehow re-referenced during collapse!",
backing_object));
VM_OBJECT_UNLOCK(backing_object);
VM_OBJECT_WUNLOCK(backing_object);
vm_object_destroy(backing_object);
object_collapses++;
@ -1777,7 +1778,7 @@ vm_object_collapse(vm_object_t object)
if (object->resident_page_count != object->size &&
vm_object_backing_scan(object,
OBSC_TEST_ALL_SHADOWED) == 0) {
VM_OBJECT_UNLOCK(backing_object);
VM_OBJECT_WUNLOCK(backing_object);
break;
}
@ -1791,7 +1792,7 @@ vm_object_collapse(vm_object_t object)
new_backing_object = backing_object->backing_object;
if ((object->backing_object = new_backing_object) != NULL) {
VM_OBJECT_LOCK(new_backing_object);
VM_OBJECT_WLOCK(new_backing_object);
LIST_INSERT_HEAD(
&new_backing_object->shadow_head,
object,
@ -1799,7 +1800,7 @@ vm_object_collapse(vm_object_t object)
);
new_backing_object->shadow_count++;
vm_object_reference_locked(new_backing_object);
VM_OBJECT_UNLOCK(new_backing_object);
VM_OBJECT_WUNLOCK(new_backing_object);
object->backing_object_offset +=
backing_object->backing_object_offset;
}
@ -1809,7 +1810,7 @@ vm_object_collapse(vm_object_t object)
* its ref_count was at least 2, it will not vanish.
*/
backing_object->ref_count--;
VM_OBJECT_UNLOCK(backing_object);
VM_OBJECT_WUNLOCK(backing_object);
object_bypasses++;
}
@ -1852,7 +1853,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
vm_page_t p, next;
int wirings;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & OBJ_UNMANAGED) == 0 ||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
("vm_object_page_remove: illegal options for object %p", object));
@ -1947,7 +1948,7 @@ vm_object_page_cache(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
struct mtx *mtx, *new_mtx;
vm_page_t p, next;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0,
("vm_object_page_cache: illegal object %p", object));
if (object->resident_page_count == 0)
@ -1995,7 +1996,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
vm_pindex_t pindex;
int rv;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
for (pindex = start; pindex < end; pindex++) {
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL |
VM_ALLOC_RETRY);
@ -2056,10 +2057,10 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (prev_object == NULL)
return (TRUE);
VM_OBJECT_LOCK(prev_object);
VM_OBJECT_WLOCK(prev_object);
if (prev_object->type != OBJT_DEFAULT &&
prev_object->type != OBJT_SWAP) {
VM_OBJECT_UNLOCK(prev_object);
VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@ -2074,7 +2075,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
* pages not mapped to prev_entry may be in use anyway)
*/
if (prev_object->backing_object != NULL) {
VM_OBJECT_UNLOCK(prev_object);
VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@ -2084,7 +2085,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if ((prev_object->ref_count > 1) &&
(prev_object->size != next_pindex)) {
VM_OBJECT_UNLOCK(prev_object);
VM_OBJECT_WUNLOCK(prev_object);
return (FALSE);
}
@ -2138,7 +2139,7 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
if (next_pindex + next_size > prev_object->size)
prev_object->size = next_pindex + next_size;
VM_OBJECT_UNLOCK(prev_object);
VM_OBJECT_WUNLOCK(prev_object);
return (TRUE);
}
@ -2146,7 +2147,7 @@ void
vm_object_set_writeable_dirty(vm_object_t object)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_VNODE)
return;
object->generation++;

View File

@ -70,15 +70,16 @@
#include <sys/queue.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#include <sys/_rwlock.h>
/*
* Types defined:
*
* vm_object_t Virtual memory object.
*
* The root of cached pages pool is protected by both the per-object mutex
* The root of cached pages pool is protected by both the per-object lock
* and the free pages queue mutex.
* On insert in the cache splay tree, the per-object mutex is expected
* On insert in the cache splay tree, the per-object lock is expected
* to be already held and the free pages queue mutex will be
* acquired during the operation too.
* On remove and lookup from the cache splay tree, only the free
@ -89,13 +90,13 @@
*
* List of locks
* (c) const until freed
* (o) per-object mutex
* (o) per-object lock
* (f) free pages queue mutex
*
*/
struct vm_object {
struct mtx mtx;
struct rwlock lock;
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
@ -203,14 +204,26 @@ extern struct vm_object kmem_object_store;
#define kernel_object (&kernel_object_store)
#define kmem_object (&kmem_object_store)
#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx)
#define VM_OBJECT_LOCK_ASSERT(object, type) \
mtx_assert(&(object)->mtx, (type))
#define VM_OBJECT_ASSERT_LOCKED(object) \
rw_assert(&(object)->lock, RA_LOCKED)
#define VM_OBJECT_ASSERT_RLOCKED(object) \
rw_assert(&(object)->lock, RA_RLOCKED)
#define VM_OBJECT_ASSERT_WLOCKED(object) \
rw_assert(&(object)->lock, RA_WLOCKED)
#define VM_OBJECT_RLOCK(object) \
rw_rlock(&(object)->lock)
#define VM_OBJECT_RUNLOCK(object) \
rw_runlock(&(object)->lock)
#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
msleep((wchan), &(object)->mtx, (pri), \
(wmesg), (timo))
#define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx)
#define VM_OBJECT_UNLOCK(object) mtx_unlock(&(object)->mtx)
rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo))
#define VM_OBJECT_TRYRLOCK(object) \
rw_try_rlock(&(object)->lock)
#define VM_OBJECT_TRYWLOCK(object) \
rw_try_wlock(&(object)->lock)
#define VM_OBJECT_WLOCK(object) \
rw_wlock(&(object)->lock)
#define VM_OBJECT_WUNLOCK(object) \
rw_wunlock(&(object)->lock)
/*
* The object must be locked or thread private.

View File

@ -96,6 +96,7 @@ __FBSDID("$FreeBSD$");
#include <sys/msgbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@ -468,7 +469,7 @@ void
vm_page_busy(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("vm_page_busy: page already busy!!!"));
m->oflags |= VPO_BUSY;
@ -483,7 +484,7 @@ void
vm_page_flash(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->oflags & VPO_WANTED) {
m->oflags &= ~VPO_WANTED;
wakeup(m);
@ -501,7 +502,7 @@ void
vm_page_wakeup(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
m->oflags &= ~VPO_BUSY;
vm_page_flash(m);
@ -511,7 +512,7 @@ void
vm_page_io_start(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
m->busy++;
}
@ -519,7 +520,7 @@ void
vm_page_io_finish(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m));
m->busy--;
if (m->busy == 0)
@ -751,7 +752,7 @@ void
vm_page_sleep(vm_page_t m, const char *msg)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (mtx_owned(vm_page_lockptr(m)))
vm_page_unlock(m);
@ -866,7 +867,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
{
vm_page_t root;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (m->object != NULL)
panic("vm_page_insert: page already inserted");
@ -942,7 +943,7 @@ vm_page_remove(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if ((object = m->object) == NULL)
return;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (m->oflags & VPO_BUSY) {
m->oflags &= ~VPO_BUSY;
vm_page_flash(m);
@ -1016,7 +1017,7 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
{
vm_page_t m;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if ((m = object->root) != NULL && m->pindex != pindex) {
m = vm_page_splay(pindex, m);
if ((object->root = m)->pindex != pindex)
@ -1038,7 +1039,7 @@ vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
{
vm_page_t m;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if ((m = TAILQ_FIRST(&object->memq)) != NULL) {
if (m->pindex < pindex) {
m = vm_page_splay(pindex, object->root);
@ -1060,7 +1061,7 @@ vm_page_next(vm_page_t m)
{
vm_page_t next;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((next = TAILQ_NEXT(m, listq)) != NULL &&
next->pindex != m->pindex + 1)
next = NULL;
@ -1078,7 +1079,7 @@ vm_page_prev(vm_page_t m)
{
vm_page_t prev;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
prev->pindex != m->pindex - 1)
prev = NULL;
@ -1256,7 +1257,7 @@ vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
* requires the object to be locked. In contrast, removal does
* not.
*/
VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(new_object);
KASSERT(vm_object_cache_is_empty(new_object),
("vm_page_cache_transfer: object %p has cached pages",
new_object));
@ -1326,7 +1327,7 @@ vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
* page queues lock in order to prove that the specified page doesn't
* exist.
*/
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if (__predict_true(vm_object_cache_is_empty(object)))
return (FALSE);
mtx_lock(&vm_page_queue_free_mtx);
@ -1375,7 +1376,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
("vm_page_alloc: inconsistent object/req"));
if (object != NULL)
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
req_class = req & VM_ALLOC_CLASS_MASK;
@ -1583,7 +1584,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
("vm_page_alloc_contig: inconsistent object/req"));
if (object != NULL) {
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_PHYS,
("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
object));
@ -1994,7 +1995,7 @@ vm_page_activate(vm_page_t m)
int queue;
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((queue = m->queue) != PQ_ACTIVE) {
if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
@ -2278,7 +2279,7 @@ vm_page_try_to_cache(vm_page_t m)
{
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
@ -2301,7 +2302,7 @@ vm_page_try_to_free(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if (m->object != NULL)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty || m->hold_count || m->busy || m->wire_count ||
(m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
return (0);
@ -2327,7 +2328,7 @@ vm_page_cache(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
m->hold_count || m->wire_count)
panic("vm_page_cache: attempting to cache busy page");
@ -2484,7 +2485,7 @@ vm_page_dontneed(vm_page_t m)
int head;
vm_page_lock_assert(m, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
dnw = PCPU_GET(dnweight);
PCPU_INC(dnweight);
@ -2549,7 +2550,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
{
vm_page_t m;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
("vm_page_grab: VM_ALLOC_RETRY is required"));
retrylookup:
@ -2578,9 +2579,9 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
VM_ALLOC_IGN_SBUSY));
if (m == NULL) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
VM_WAIT;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
goto retrylookup;
} else if (m->valid != 0)
return (m);
@ -2630,7 +2631,7 @@ vm_page_set_valid_range(vm_page_t m, int base, int size)
{
int endoff, frag;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (size == 0) /* handle degenerate case */
return;
@ -2683,7 +2684,7 @@ vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
* write mapped, then the page's dirty field cannot possibly be
* set by a concurrent pmap operation.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
m->dirty &= ~pagebits;
else {
@ -2737,7 +2738,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
vm_page_bits_t oldvalid, pagebits;
int endoff, frag;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (size == 0) /* handle degenerate case */
return;
@ -2827,7 +2828,7 @@ vm_page_set_invalid(vm_page_t m, int base, int size)
{
vm_page_bits_t bits;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
KASSERT((m->oflags & VPO_BUSY) == 0,
("vm_page_set_invalid: page %p is busy", m));
bits = vm_page_bits(base, size);
@ -2856,7 +2857,7 @@ vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
int b;
int i;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
/*
* Scan the valid bits looking for invalid sections that
* must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
@ -2895,7 +2896,7 @@ vm_page_is_valid(vm_page_t m, int base, int size)
{
vm_page_bits_t bits;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
bits = vm_page_bits(base, size);
if (m->valid && ((m->valid & bits) == bits))
return 1;
@ -2910,7 +2911,7 @@ void
vm_page_test_dirty(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
vm_page_dirty(m);
}
@ -2964,7 +2965,7 @@ vm_page_cowfault(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->paging_in_progress != 0,
("vm_page_cowfault: object %p's paging-in-progress count is zero.",
object));
@ -2977,9 +2978,9 @@ vm_page_cowfault(vm_page_t m)
if (mnew == NULL) {
vm_page_insert(m, object, pindex);
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
VM_WAIT;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (m == vm_page_lookup(object, pindex)) {
vm_page_lock(m);
goto retry_alloc;
@ -3036,11 +3037,11 @@ vm_page_cowsetup(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->oflags & VPO_UNMANAGED) != 0 ||
m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYWLOCK(m->object))
return (EBUSY);
m->cow++;
pmap_remove_write(m);
VM_OBJECT_UNLOCK(m->object);
VM_OBJECT_WUNLOCK(m->object);
return (0);
}
@ -3057,7 +3058,7 @@ vm_page_object_lock_assert(vm_page_t m)
* here.
*/
if (m->object != NULL && (m->oflags & VPO_BUSY) == 0)
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
}
#endif

View File

@ -92,6 +92,7 @@ __FBSDID("$FreeBSD$");
#include <sys/signalvar.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/rwlock.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
@ -248,7 +249,7 @@ vm_pageout_init_marker(vm_page_t marker, u_short queue)
/*
* vm_pageout_fallback_object_lock:
*
* Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is
* Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
* known to have failed and page queue must be either PQ_ACTIVE or
* PQ_INACTIVE. To avoid lock order violation, unlock the page queues
* while locking the vm object. Use marker page to detect page queue
@ -276,7 +277,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, pageq);
vm_pagequeue_unlock(pq);
vm_page_unlock(m);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_page_lock(m);
vm_pagequeue_lock(pq);
@ -346,7 +347,7 @@ vm_pageout_clean(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
object = m->object;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
@ -484,7 +485,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
int numpagedout = 0;
int i, runlen;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Initiate I/O. Bump the vm_page_t->busy counter and
@ -595,12 +596,12 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
continue;
}
object = m->object;
if ((!VM_OBJECT_TRYLOCK(object) &&
if ((!VM_OBJECT_TRYWLOCK(object) &&
(!vm_pageout_fallback_object_lock(m, &next) ||
m->hold_count != 0)) || (m->oflags & VPO_BUSY) != 0 ||
m->busy != 0) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
continue;
}
vm_page_test_dirty(m);
@ -609,19 +610,19 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
if (m->dirty != 0) {
vm_page_unlock(m);
if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
continue;
}
if (object->type == OBJT_VNODE) {
vm_pagequeue_unlock(pq);
vp = object->handle;
vm_object_reference_locked(object);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
(void)vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
VOP_UNLOCK(vp, 0);
vm_object_deallocate(object);
vn_finished_write(mp);
@ -632,7 +633,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
m_tmp = m;
vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
0, NULL, NULL);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (TRUE);
}
} else {
@ -644,7 +645,7 @@ vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
vm_page_cache(m);
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
vm_pagequeue_unlock(pq);
return (FALSE);
@ -713,13 +714,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
vm_page_t p;
int actcount, remove_mode;
VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(first_object);
if ((first_object->flags & OBJ_FICTITIOUS) != 0)
return;
for (object = first_object;; object = backing_object) {
if (pmap_resident_count(pmap) <= desired)
goto unlock_return;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
if ((object->flags & OBJ_UNMANAGED) != 0 ||
object->paging_in_progress != 0)
goto unlock_return;
@ -775,13 +776,13 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
}
if ((backing_object = object->backing_object) == NULL)
goto unlock_return;
VM_OBJECT_LOCK(backing_object);
VM_OBJECT_WLOCK(backing_object);
if (object != first_object)
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
unlock_return:
if (object != first_object)
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
/*
@ -811,15 +812,15 @@ vm_pageout_map_deactivate_pages(map, desired)
while (tmpe != &map->header) {
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
if (obj != NULL && VM_OBJECT_TRYWLOCK(obj)) {
if (obj->shadow_count <= 1 &&
(bigobj == NULL ||
bigobj->resident_page_count < obj->resident_page_count)) {
if (bigobj != NULL)
VM_OBJECT_UNLOCK(bigobj);
VM_OBJECT_WUNLOCK(bigobj);
bigobj = obj;
} else
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
}
}
if (tmpe->wired_count > 0)
@ -829,7 +830,7 @@ vm_pageout_map_deactivate_pages(map, desired)
if (bigobj != NULL) {
vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
VM_OBJECT_UNLOCK(bigobj);
VM_OBJECT_WUNLOCK(bigobj);
}
/*
* Next, hunt around for other pages to deactivate. We actually
@ -842,9 +843,9 @@ vm_pageout_map_deactivate_pages(map, desired)
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
if (obj != NULL) {
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
}
}
tmpe = tmpe->next;
@ -963,10 +964,10 @@ vm_pageout_scan(int pass)
continue;
}
object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
continue;
}
@ -979,7 +980,7 @@ vm_pageout_scan(int pass)
*/
if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
addl_page_shortage++;
continue;
}
@ -1016,7 +1017,7 @@ vm_pageout_scan(int pass)
vm_page_activate(m);
vm_page_unlock(m);
m->act_count += actcount + ACT_ADVANCE;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
goto relock_queues;
}
@ -1032,13 +1033,13 @@ vm_pageout_scan(int pass)
vm_page_activate(m);
vm_page_unlock(m);
m->act_count += actcount + ACT_ADVANCE + 1;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
goto relock_queues;
}
if (m->hold_count != 0) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* Held pages are essentially stuck in the
@ -1122,7 +1123,7 @@ vm_pageout_scan(int pass)
if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
vm_pagequeue_lock(pq);
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
queues_locked = TRUE;
vm_page_requeue_locked(m);
goto relock_queues;
@ -1165,17 +1166,17 @@ vm_pageout_scan(int pass)
KASSERT(mp != NULL,
("vp %p with NULL v_mount", vp));
vm_object_reference_locked(object);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
curthread)) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
++pageout_lock_miss;
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
vp = NULL;
goto unlock_and_continue;
}
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
vm_page_lock(m);
vm_pagequeue_lock(pq);
queues_locked = TRUE;
@ -1236,7 +1237,7 @@ vm_pageout_scan(int pass)
}
unlock_and_continue:
vm_page_lock_assert(m, MA_NOTOWNED);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (mp != NULL) {
if (queues_locked) {
vm_pagequeue_unlock(pq);
@ -1251,7 +1252,7 @@ vm_pageout_scan(int pass)
goto relock_queues;
}
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
relock_queues:
if (!queues_locked) {
vm_pagequeue_lock(pq);
@ -1299,9 +1300,9 @@ vm_pageout_scan(int pass)
continue;
}
object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vm_page_unlock(m);
m = next;
continue;
@ -1314,7 +1315,7 @@ vm_pageout_scan(int pass)
(m->oflags & VPO_BUSY) ||
(m->hold_count != 0)) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vm_page_requeue_locked(m);
m = next;
continue;
@ -1375,7 +1376,7 @@ vm_pageout_scan(int pass)
vm_page_requeue_locked(m);
}
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
m = next;
}
vm_pagequeue_unlock(pq);
@ -1571,9 +1572,9 @@ vm_pageout_page_stats(void)
continue;
}
object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
if (!VM_OBJECT_TRYWLOCK(object) &&
!vm_pageout_fallback_object_lock(m, &next)) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vm_page_unlock(m);
m = next;
continue;
@ -1586,7 +1587,7 @@ vm_pageout_page_stats(void)
(m->oflags & VPO_BUSY) ||
(m->hold_count != 0)) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vm_page_requeue_locked(m);
m = next;
continue;
@ -1625,7 +1626,7 @@ vm_pageout_page_stats(void)
}
}
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
m = next;
}
vm_pagequeue_unlock(pq);

View File

@ -74,6 +74,7 @@ __FBSDID("$FreeBSD$");
#include <sys/buf.h>
#include <sys/ucred.h>
#include <sys/malloc.h>
#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -248,7 +249,7 @@ vm_pager_deallocate(object)
vm_object_t object;
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
(*pagertab[object->type]->pgo_dealloc) (object);
}
@ -272,13 +273,13 @@ vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
TAILQ_FOREACH(object, pg_list, pager_object_list) {
if (object->handle == handle) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if ((object->flags & OBJ_DEAD) == 0) {
vm_object_reference_locked(object);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
break;
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
}
return (object);

View File

@ -124,7 +124,7 @@ vm_pager_get_pages(
) {
int r;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
r = (*pagertab[object->type]->pgo_getpages)(object, m, count, reqpage);
if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) {
vm_page_zero_invalid(m[reqpage], TRUE);
@ -141,7 +141,7 @@ vm_pager_put_pages(
int *rtvals
) {
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
(*pagertab[object->type]->pgo_putpages)
(object, m, count, flags, rtvals);
}
@ -165,7 +165,7 @@ vm_pager_has_page(
) {
boolean_t ret;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
ret = (*pagertab[object->type]->pgo_haspage)
(object, offset, before, after);
return (ret);
@ -188,7 +188,7 @@ static __inline void
vm_pager_page_unswapped(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (pagertab[m->object->type]->pgo_pageunswapped)
(*pagertab[m->object->type]->pgo_pageunswapped)(m);
}

View File

@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@ -311,7 +312,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
int i, index, n;
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
/*
@ -495,7 +496,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
vm_reserv_t rv;
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* Is a reservation fundamentally impossible?
@ -870,7 +871,7 @@ vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
{
vm_reserv_t rv;
VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(new_object);
rv = vm_reserv_from_page(m);
if (rv->object == old_object) {
mtx_lock(&vm_page_queue_free_mtx);

View File

@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
#include <sys/vmmeter.h>
#include <sys/limits.h>
#include <sys/conf.h>
#include <sys/rwlock.h>
#include <sys/sf_buf.h>
#include <machine/atomic.h>
@ -109,9 +110,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
return (0);
while ((object = vp->v_object) != NULL) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (!(object->flags & OBJ_DEAD)) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (0);
}
VOP_UNLOCK(vp, 0);
@ -135,9 +136,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
*/
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
object->ref_count--;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
vrele(vp);
KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
@ -154,7 +155,7 @@ vnode_destroy_vobject(struct vnode *vp)
if (obj == NULL)
return;
ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
if (obj->ref_count == 0) {
/*
* vclean() may be called twice. The first time
@ -167,13 +168,13 @@ vnode_destroy_vobject(struct vnode *vp)
if ((obj->flags & OBJ_DEAD) == 0)
vm_object_terminate(obj);
else
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
} else {
/*
* Woe to the process that tries to page now :-).
*/
vm_pager_deallocate(obj);
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
}
vp->v_object = NULL;
}
@ -206,7 +207,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
*/
retry:
while ((object = vp->v_object) != NULL) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if ((object->flags & OBJ_DEAD) == 0)
break;
vm_object_set_flag(object, OBJ_DISCONNECTWNT);
@ -239,7 +240,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
VI_UNLOCK(vp);
} else {
object->ref_count++;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
vref(vp);
return (object);
@ -259,7 +260,7 @@ vnode_pager_dealloc(object)
if (vp == NULL)
panic("vnode_pager_dealloc: pager already dealloced");
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
vm_object_pip_wait(object, "vnpdea");
refs = object->ref_count;
@ -278,10 +279,10 @@ vnode_pager_dealloc(object)
}
vp->v_object = NULL;
VOP_UNSET_TEXT(vp);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
while (refs-- > 0)
vunref(vp);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
static boolean_t
@ -299,7 +300,7 @@ vnode_pager_haspage(object, pindex, before, after)
int bsize;
int pagesperblock, blocksperpage;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
/*
* If no vp or vp is doomed or marked transparent to VM, we do not
* have the page.
@ -322,9 +323,9 @@ vnode_pager_haspage(object, pindex, before, after)
blocksperpage = (PAGE_SIZE / bsize);
reqblock = pindex * blocksperpage;
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (err)
return TRUE;
if (bn == -1)
@ -379,12 +380,12 @@ vnode_pager_setsize(vp, nsize)
if ((object = vp->v_object) == NULL)
return;
/* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (nsize == object->un_pager.vnp.vnp_size) {
/*
* Hasn't changed size
*/
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return;
}
nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
@ -445,7 +446,7 @@ vnode_pager_setsize(vp, nsize)
}
object->un_pager.vnp.vnp_size = nsize;
object->size = nobjsize;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
/*
@ -568,9 +569,9 @@ vnode_pager_input_smlfs(object, m)
bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
KASSERT((m->dirty & bits) == 0,
("vnode_pager_input_smlfs: page %p is dirty", m));
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
m->valid |= bits;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
sf_buf_free(sf);
if (error) {
@ -594,7 +595,7 @@ vnode_pager_input_old(object, m)
struct sf_buf *sf;
struct vnode *vp;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
VM_OBJECT_ASSERT_WLOCKED(object);
error = 0;
/*
@ -607,7 +608,7 @@ vnode_pager_input_old(object, m)
if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
vp = object->handle;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* Allocate a kernel virtual address and initialize so that
@ -637,7 +638,7 @@ vnode_pager_input_old(object, m)
}
sf_buf_free(sf);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
if (!error)
@ -669,11 +670,11 @@ vnode_pager_getpages(object, m, count, reqpage)
int bytes = count * PAGE_SIZE;
vp = object->handle;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: FS getpages not implemented\n"));
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
return rtval;
}
@ -723,7 +724,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL);
if (error == EOPNOTSUPP) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
@ -734,17 +735,17 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
error = vnode_pager_input_old(object, m[reqpage]);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (error);
} else if (error != 0) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
/*
@ -754,14 +755,14 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
*/
} else if ((PAGE_SIZE / bsize) > 1 &&
(vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
PCPU_INC(cnt.v_vnodein);
PCPU_INC(cnt.v_vnodepgsin);
return vnode_pager_input_smlfs(object, m[reqpage]);
@ -772,7 +773,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* clean up and return. Otherwise we have to re-read the
* media.
*/
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
for (i = 0; i < count; i++)
if (i != reqpage) {
@ -780,7 +781,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return VM_PAGER_OK;
} else if (reqblock == -1) {
pmap_zero_page(m[reqpage]);
@ -793,11 +794,11 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_OK);
}
m[reqpage]->valid = 0;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* here on direct device I/O
@ -810,18 +811,18 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (first = 0, i = 0; i < count; i = runend) {
if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
&runpg) != 0) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return (VM_PAGER_ERROR);
}
if (firstaddr == -1) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
(intmax_t)firstaddr, (uintmax_t)(foff >> 32),
@ -833,29 +834,29 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
runend = i + 1;
first = runend;
continue;
}
runend = i + runpg;
if (runend <= reqpage) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (j = i; j < runend; j++) {
vm_page_lock(m[j]);
vm_page_free(m[j]);
vm_page_unlock(m[j]);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
} else {
if (runpg < (count - first)) {
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = first + runpg; i < count; i++) {
vm_page_lock(m[i]);
vm_page_free(m[i]);
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
count = first + runpg;
}
break;
@ -946,7 +947,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
pbrelbo(bp);
relpbuf(bp, &vnode_pbuf_freecnt);
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
vm_page_t mt;
@ -983,7 +984,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
if (i != reqpage)
vm_page_readahead_finish(mt);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
if (error) {
printf("vnode_pager_getpages: I/O read error\n");
}
@ -1029,11 +1030,11 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
* Call device-specific putpages function
*/
vp = object->handle;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
KASSERT(rtval != EOPNOTSUPP,
("vnode_pager: stale FS putpages\n"));
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
}
@ -1095,7 +1096,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
* We do not under any circumstances truncate the valid bits, as
* this will screw up bogus page replacement.
*/
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > poffset) {
int pgoff;
@ -1127,7 +1128,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
}
}
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/*
* pageouts are already clustered, use IO_ASYNC to force a bawrite()
@ -1181,7 +1182,7 @@ vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
if (written == 0)
return;
obj = ma[0]->object;
VM_OBJECT_LOCK(obj);
VM_OBJECT_WLOCK(obj);
for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
if (pos < trunc_page(written)) {
rtvals[i] = VM_PAGER_OK;
@ -1192,7 +1193,7 @@ vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
}
}
VM_OBJECT_UNLOCK(obj);
VM_OBJECT_WUNLOCK(obj);
}
void
@ -1202,9 +1203,9 @@ vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
struct vnode *vp;
vm_ooffset_t old_wm;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
if (object->type != OBJT_VNODE) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return;
}
old_wm = object->un_pager.vnp.writemappings;
@ -1221,7 +1222,7 @@ vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
__func__, vp, vp->v_writecount);
}
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
}
void
@ -1232,14 +1233,14 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
struct mount *mp;
vm_offset_t inc;
VM_OBJECT_LOCK(object);
VM_OBJECT_WLOCK(object);
/*
* First, recheck the object type to account for the race when
* the vnode is reclaimed.
*/
if (object->type != OBJT_VNODE) {
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return;
}
@ -1250,13 +1251,13 @@ vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
inc = end - start;
if (object->un_pager.vnp.writemappings != inc) {
object->un_pager.vnp.writemappings -= inc;
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
return;
}
vp = object->handle;
vhold(vp);
VM_OBJECT_UNLOCK(object);
VM_OBJECT_WUNLOCK(object);
mp = NULL;
vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);