Define macro VM_MAP_ENTRY_FOREACH for enumerating the entries in a vm_map.

In case the implementation ever changes from using a chain of next pointers,
then changing the macro definition will be necessary, but changing all the
files that iterate over vm_map entries will not.

Drop a counter in vm_object.c that would have an effect only if the
vm_map entry count was wrong.

Discussed with: alc
Reviewed by: markj
Tested by: pho (earlier version)
Differential Revision:	https://reviews.freebsd.org/D21882
This commit is contained in:
Doug Moore 2019-10-08 07:14:21 +00:00
parent dbef5f7155
commit 2288078c5e
13 changed files with 28 additions and 45 deletions

View File

@ -1174,8 +1174,7 @@ linprocfs_doprocmaps(PFS_FILL_ARGS)
l_map_str = l32_map_str;
map = &vm->vm_map;
vm_map_lock_read(map);
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
VM_MAP_ENTRY_FOREACH(entry, map) {
name = "";
freename = NULL;
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)

View File

@ -1884,7 +1884,7 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
map = &vm->vm_map;
vm_map_lock_read(map);
for (entry = map->header.next; entry != &map->header; entry = entry->next) {
VM_MAP_ENTRY_FOREACH(entry, map) {
if (entry == NULL) {
PMCDBG2(LOG,OPS,2, "hwpmc: vm_map entry unexpectedly "
@ -1988,7 +1988,7 @@ pmc_log_process_mappings(struct pmc_owner *po, struct proc *p)
* new lookup for this entry. If there is no entry
* for this address range, vm_map_lookup_entry() will
* return the previous one, so we always want to go to
* entry->next on the next loop iteration.
* the next entry on the next loop iteration.
*
* There is an edge condition here that can occur if
* there is no entry at or before this address. In

View File

@ -118,8 +118,7 @@ procfs_doprocmap(PFS_FILL_ARGS)
return (ESRCH);
map = &vm->vm_map;
vm_map_lock_read(map);
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
VM_MAP_ENTRY_FOREACH(entry, map) {
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;

View File

@ -262,8 +262,7 @@ tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
vm_map_lock(map);
if (map->busy)
vm_map_wait_busy(map);
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
VM_MAP_ENTRY_FOREACH(entry, map) {
if ((entry->eflags & (MAP_ENTRY_GUARD |
MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
(entry->max_protection & VM_PROT_WRITE) == 0)

View File

@ -1738,8 +1738,7 @@ each_dumpable_segment(struct thread *td, segment_callback func, void *closure)
boolean_t ignore_entry;
vm_map_lock_read(map);
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
VM_MAP_ENTRY_FOREACH(entry, map) {
/*
* Don't dump inaccessible mappings, deal with legacy
* coredump mode.

View File

@ -2239,8 +2239,7 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
map = &vm->vm_map;
vm_map_lock_read(map);
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
VM_MAP_ENTRY_FOREACH(entry, map) {
vm_object_t obj, tobj, lobj;
vm_offset_t addr;
@ -2455,8 +2454,7 @@ kern_proc_vmmap_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags)
error = 0;
map = &vm->vm_map;
vm_map_lock_read(map);
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
VM_MAP_ENTRY_FOREACH(entry, map) {
if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;

View File

@ -382,22 +382,19 @@ ptrace_vm_entry(struct thread *td, struct proc *p, struct ptrace_vm_entry *pve)
vm_map_lock_read(map);
do {
entry = map->header.next;
index = 0;
while (index < pve->pve_entry && entry != &map->header) {
entry = entry->next;
index++;
}
if (index != pve->pve_entry) {
error = EINVAL;
break;
}
KASSERT((map->header.eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
("Submap in map header"));
while ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
entry = entry->next;
index = 0;
VM_MAP_ENTRY_FOREACH(entry, map) {
if (index >= pve->pve_entry &&
(entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
break;
index++;
}
if (index < pve->pve_entry) {
error = EINVAL;
break;
}
if (entry == &map->header) {
error = ENOENT;
break;

View File

@ -264,7 +264,7 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
return;
vm_map_lock(map);
for (vme = map->header.next; vme != &map->header; vme = vme->next) {
VM_MAP_ENTRY_FOREACH(vme, map) {
if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) {
mac_proc_vm_revoke_recurse(td, cred,
vme->object.sub_map);

View File

@ -2621,7 +2621,7 @@ vmspace_swap_count(struct vmspace *vmspace)
map = &vmspace->vm_map;
count = 0;
for (cur = map->header.next; cur != &map->header; cur = cur->next) {
VM_MAP_ENTRY_FOREACH(cur, map) {
if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
continue;
object = cur->object.vm_object;

View File

@ -416,6 +416,10 @@ int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, v
vm_pindex_t *, vm_prot_t *, boolean_t *);
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
#define VM_MAP_ENTRY_FOREACH(it, map) \
for ((it) = (map)->header.next; \
(it) != &(map)->header; \
(it) = (it)->next)
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,

View File

@ -2376,29 +2376,22 @@ _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
vm_map_t tmpm;
vm_map_entry_t tmpe;
vm_object_t obj;
int entcount;
if (map == 0)
return 0;
if (entry == 0) {
tmpe = map->header.next;
entcount = map->nentries;
while (entcount-- && (tmpe != &map->header)) {
VM_MAP_ENTRY_FOREACH(tmpe, map) {
if (_vm_object_in_map(map, object, tmpe)) {
return 1;
}
tmpe = tmpe->next;
}
} else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
tmpm = entry->object.sub_map;
tmpe = tmpm->header.next;
entcount = tmpm->nentries;
while (entcount-- && tmpe != &tmpm->header) {
VM_MAP_ENTRY_FOREACH(tmpe, tmpm) {
if (_vm_object_in_map(tmpm, object, tmpe)) {
return 1;
}
tmpe = tmpe->next;
}
} else if ((obj = entry->object.vm_object) != NULL) {
for (; obj; obj = obj->backing_object)

View File

@ -1783,8 +1783,7 @@ vm_pageout_oom_pagecount(struct vmspace *vmspace)
KASSERT(!map->system_map, ("system map"));
sx_assert(&map->lock, SA_LOCKED);
res = 0;
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
VM_MAP_ENTRY_FOREACH(entry, map) {
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
continue;
obj = entry->object.vm_object;

View File

@ -284,8 +284,7 @@ vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
* first, search out the biggest object, and try to free pages from
* that.
*/
tmpe = map->header.next;
while (tmpe != &map->header) {
VM_MAP_ENTRY_FOREACH(tmpe, map) {
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
obj = tmpe->object.vm_object;
if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
@ -302,7 +301,6 @@ vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
}
if (tmpe->wired_count > 0)
nothingwired = FALSE;
tmpe = tmpe->next;
}
if (bigobj != NULL) {
@ -313,8 +311,7 @@ vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
* Next, hunt around for other pages to deactivate. We actually
* do this search sort of wrong -- .text first is not the best idea.
*/
tmpe = map->header.next;
while (tmpe != &map->header) {
VM_MAP_ENTRY_FOREACH(tmpe, map) {
if (pmap_resident_count(vm_map_pmap(map)) <= desired)
break;
if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
@ -326,7 +323,6 @@ vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
VM_OBJECT_RUNLOCK(obj);
}
}
tmpe = tmpe->next;
}
/*