Switch some "low-hanging fruit" to acquire read lock on vmobjects

rather than write locks.

Sponsored by:	EMC / Isilon storage division
Reviewed by:	alc
Tested by:	pho
This commit is contained in:
Attilio Rao 2013-04-08 19:58:32 +00:00
parent 5923c29332
commit bc403f030d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=249277
3 changed files with 19 additions and 19 deletions

View File

@ -1277,15 +1277,15 @@ each_writable_segment(td, func, closure)
continue;
/* Ignore memory-mapped devices and such things. */
VM_OBJECT_WLOCK(object);
VM_OBJECT_RLOCK(object);
while ((backing_object = object->backing_object) != NULL) {
VM_OBJECT_WLOCK(backing_object);
VM_OBJECT_WUNLOCK(object);
VM_OBJECT_RLOCK(backing_object);
VM_OBJECT_RUNLOCK(object);
object = backing_object;
}
ignore_entry = object->type != OBJT_DEFAULT &&
object->type != OBJT_SWAP && object->type != OBJT_VNODE;
VM_OBJECT_WUNLOCK(object);
VM_OBJECT_RUNLOCK(object);
if (ignore_entry)
continue;

View File

@ -1995,7 +1995,7 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
kve->kve_private_resident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
VM_OBJECT_WLOCK(obj);
VM_OBJECT_RLOCK(obj);
if (obj->shadow_count == 1)
kve->kve_private_resident =
obj->resident_page_count;
@ -2010,9 +2010,9 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
VM_OBJECT_WLOCK(tobj);
VM_OBJECT_RLOCK(tobj);
if (lobj != obj)
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_RUNLOCK(lobj);
lobj = tobj;
}
@ -2072,11 +2072,11 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
break;
}
if (lobj != obj)
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_RUNLOCK(lobj);
kve->kve_ref_count = obj->ref_count;
kve->kve_shadow_count = obj->shadow_count;
VM_OBJECT_WUNLOCK(obj);
VM_OBJECT_RUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(curthread, vp, &fullpath,
&freepath);
@ -2162,7 +2162,7 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
kve->kve_private_resident = 0;
obj = entry->object.vm_object;
if (obj != NULL) {
VM_OBJECT_WLOCK(obj);
VM_OBJECT_RLOCK(obj);
if (obj->shadow_count == 1)
kve->kve_private_resident =
obj->resident_page_count;
@ -2183,9 +2183,9 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
VM_OBJECT_WLOCK(tobj);
VM_OBJECT_RLOCK(tobj);
if (lobj != obj)
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_RUNLOCK(lobj);
lobj = tobj;
}
@ -2247,11 +2247,11 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
break;
}
if (lobj != obj)
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_RUNLOCK(lobj);
kve->kve_ref_count = obj->ref_count;
kve->kve_shadow_count = obj->shadow_count;
VM_OBJECT_WUNLOCK(obj);
VM_OBJECT_RUNLOCK(obj);
if (vp != NULL) {
vn_fullpath(curthread, vp, &fullpath,
&freepath);

View File

@ -382,7 +382,7 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
obj = entry->object.vm_object;
if (obj != NULL)
VM_OBJECT_WLOCK(obj);
VM_OBJECT_RLOCK(obj);
} while (0);
vm_map_unlock_read(map);
@ -395,9 +395,9 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
lobj = obj;
for (tobj = obj; tobj != NULL; tobj = tobj->backing_object) {
if (tobj != obj)
VM_OBJECT_WLOCK(tobj);
VM_OBJECT_RLOCK(tobj);
if (lobj != obj)
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_RUNLOCK(lobj);
lobj = tobj;
pve->pve_offset += tobj->backing_object_offset;
}
@ -405,8 +405,8 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
if (vp != NULL)
vref(vp);
if (lobj != obj)
VM_OBJECT_WUNLOCK(lobj);
VM_OBJECT_WUNLOCK(obj);
VM_OBJECT_RUNLOCK(lobj);
VM_OBJECT_RUNLOCK(obj);
if (vp != NULL) {
freepath = NULL;