Define wrapper functions vm_map_entry_{succ,pred} to act as wrappers

around entry->{next,prev} when those are used for ordered list
traversal, and use those wrapper functions everywhere. Where the next
field is used for maintaining a stack of deferred operations, #define
defer_next to make that different usage clearer, and then use the
'right' pointer instead of 'next' for that purpose.

Approved by: markj
Tested by: pho (as part of a larger patch)
Differential Revision: https://reviews.freebsd.org/D22347
This commit is contained in:
Doug Moore 2019-11-13 15:56:07 +00:00
parent 2058e7dbde
commit 7cdcf86360
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=354684
4 changed files with 101 additions and 62 deletions

View File

@ -363,7 +363,8 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
}
pmap_protect(map->pmap, vme->start, vme->end,
vme->protection & ~revokeperms);
vm_map_try_merge_entries(map, vme->prev, vme);
vm_map_try_merge_entries(map, vm_map_entry_pred(vme),
vme);
}
}
vm_map_unlock(map);

View File

@ -563,6 +563,12 @@ vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
}
}
/*
* Use a different name for this vm_map_entry field when it's use
* is not consistent with its use as part of an ordered search tree.
*/
#define defer_next right
static void
vm_map_process_deferred(void)
{
@ -574,7 +580,7 @@ vm_map_process_deferred(void)
entry = td->td_map_def_user;
td->td_map_def_user = NULL;
while (entry != NULL) {
next = entry->next;
next = entry->defer_next;
MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT |
MAP_ENTRY_VN_EXEC));
@ -1436,7 +1442,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
/*
* Assert that the next entry doesn't overlap the end point.
*/
if (prev_entry->next->start < end)
if (vm_map_entry_succ(prev_entry)->start < end)
return (KERN_NO_SPACE);
if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
@ -1529,7 +1535,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
map->size += end - prev_entry->end;
vm_map_entry_resize(map, prev_entry,
end - prev_entry->end);
vm_map_try_merge_entries(map, prev_entry, prev_entry->next);
vm_map_try_merge_entries(map, prev_entry,
vm_map_entry_succ(prev_entry));
return (KERN_SUCCESS);
}
@ -1590,7 +1597,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
* other cases, which are less common.
*/
vm_map_try_merge_entries(map, prev_entry, new_entry);
vm_map_try_merge_entries(map, new_entry, new_entry->next);
vm_map_try_merge_entries(map, new_entry, vm_map_entry_succ(new_entry));
if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
@ -2288,7 +2295,7 @@ vm_map_submap(
if (vm_map_lookup_entry(map, start, &entry)) {
vm_map_clip_start(map, entry, start);
} else
entry = entry->next;
entry = vm_map_entry_succ(entry);
vm_map_clip_end(map, entry, end);
@ -2445,12 +2452,13 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &entry))
entry = entry->next;
entry = vm_map_entry_succ(entry);
/*
* Make a first pass to check for protection violations.
*/
for (current = entry; current->start < end; current = current->next) {
for (current = entry; current->start < end;
current = vm_map_entry_succ(current)) {
if ((current->eflags & MAP_ENTRY_GUARD) != 0)
continue;
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
@ -2488,7 +2496,8 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
*/
rv = KERN_SUCCESS;
vm_map_clip_start(map, entry, start);
for (current = entry; current->start < end; current = current->next) {
for (current = entry; current->start < end;
current = vm_map_entry_succ(current)) {
vm_map_clip_end(map, current, end);
@ -2546,8 +2555,8 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
* [Note that clipping is not necessary the second time.]
*/
for (current = entry; current->start < end;
vm_map_try_merge_entries(map, current->prev, current),
current = current->next) {
vm_map_try_merge_entries(map, vm_map_entry_pred(current), current),
current = vm_map_entry_succ(current)) {
if (rv != KERN_SUCCESS ||
(current->eflags & MAP_ENTRY_GUARD) != 0)
continue;
@ -2585,7 +2594,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
#undef MASK
}
}
vm_map_try_merge_entries(map, current->prev, current);
vm_map_try_merge_entries(map, vm_map_entry_pred(current), current);
vm_map_unlock(map);
return (rv);
}
@ -2648,7 +2657,7 @@ vm_map_madvise(
if (modify_map)
vm_map_clip_start(map, entry, start);
} else {
entry = entry->next;
entry = vm_map_entry_succ(entry);
}
if (modify_map) {
@ -2659,7 +2668,7 @@ vm_map_madvise(
* limited to the specified address range.
*/
for (current = entry; current->start < end;
current = current->next) {
current = vm_map_entry_succ(current)) {
if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
continue;
@ -2690,9 +2699,11 @@ vm_map_madvise(
default:
break;
}
vm_map_try_merge_entries(map, current->prev, current);
vm_map_try_merge_entries(map,
vm_map_entry_pred(current), current);
}
vm_map_try_merge_entries(map, current->prev, current);
vm_map_try_merge_entries(map, vm_map_entry_pred(current),
current);
vm_map_unlock(map);
} else {
vm_pindex_t pstart, pend;
@ -2705,7 +2716,7 @@ vm_map_madvise(
* the vm_object pindex and count.
*/
for (current = entry; current->start < end;
current = current->next) {
current = vm_map_entry_succ(current)) {
vm_offset_t useEnd, useStart;
if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
@ -2812,16 +2823,16 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
entry = temp_entry;
vm_map_clip_start(map, entry, start);
} else
entry = temp_entry->next;
entry = vm_map_entry_succ(temp_entry);
while (entry->start < end) {
vm_map_clip_end(map, entry, end);
if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
new_inheritance != VM_INHERIT_ZERO)
entry->inheritance = new_inheritance;
vm_map_try_merge_entries(map, entry->prev, entry);
entry = entry->next;
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
entry = vm_map_entry_succ(entry);
}
vm_map_try_merge_entries(map, entry->prev, entry);
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
vm_map_unlock(map);
return (KERN_SUCCESS);
}
@ -2870,7 +2881,7 @@ vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start,
*io_end = start;
return (NULL);
}
entry = entry->next;
entry = vm_map_entry_succ(entry);
}
return (entry);
}
@ -2896,7 +2907,7 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &first_entry)) {
if (holes_ok)
first_entry = first_entry->next;
first_entry = vm_map_entry_succ(first_entry);
else {
vm_map_unlock(map);
return (KERN_INVALID_ADDRESS);
@ -2940,7 +2951,8 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
* If holes_ok, skip this check.
*/
if (!holes_ok &&
(entry->end < end && entry->next->start > entry->end)) {
(entry->end < end &&
vm_map_entry_succ(entry)->start > entry->end)) {
end = entry->end;
rv = KERN_INVALID_ADDRESS;
break;
@ -2954,15 +2966,16 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
rv = KERN_INVALID_ARGUMENT;
break;
}
entry = entry->next;
entry = vm_map_entry_succ(entry);
}
need_wakeup = false;
if (first_entry == NULL &&
!vm_map_lookup_entry(map, start, &first_entry)) {
KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
first_entry = first_entry->next;
first_entry = vm_map_entry_succ(first_entry);
}
for (entry = first_entry; entry->start < end; entry = entry->next) {
for (entry = first_entry; entry->start < end;
entry = vm_map_entry_succ(entry)) {
/*
* If holes_ok was specified, an empty
* space in the unwired region could have been mapped
@ -2998,9 +3011,9 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = true;
}
vm_map_try_merge_entries(map, entry->prev, entry);
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
}
vm_map_try_merge_entries(map, entry->prev, entry);
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
vm_map_unlock(map);
if (need_wakeup)
vm_map_wakeup(map);
@ -3106,7 +3119,7 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
VM_MAP_RANGE_CHECK(map, start, end);
if (!vm_map_lookup_entry(map, start, &first_entry)) {
if (holes_ok)
first_entry = first_entry->next;
first_entry = vm_map_entry_succ(first_entry);
else
return (KERN_INVALID_ADDRESS);
}
@ -3210,7 +3223,7 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
faddr < entry->end)
vm_map_wire_entry_failure(map,
entry, faddr);
entry = entry->next;
entry = vm_map_entry_succ(entry);
}
}
if (rv != KERN_SUCCESS) {
@ -3229,12 +3242,13 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
* If holes_ok was specified, skip this check.
*/
if (!holes_ok &&
entry->end < end && entry->next->start > entry->end) {
entry->end < end &&
vm_map_entry_succ(entry)->start > entry->end) {
end = entry->end;
rv = KERN_INVALID_ADDRESS;
goto done;
}
entry = entry->next;
entry = vm_map_entry_succ(entry);
}
rv = KERN_SUCCESS;
done:
@ -3242,9 +3256,10 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
if (first_entry == NULL &&
!vm_map_lookup_entry(map, start, &first_entry)) {
KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
first_entry = first_entry->next;
first_entry = vm_map_entry_succ(first_entry);
}
for (entry = first_entry; entry->start < end; entry = entry->next) {
for (entry = first_entry; entry->start < end;
entry = vm_map_entry_succ(entry)) {
/*
* If holes_ok was specified, an empty
* space in the unwired region could have been mapped
@ -3297,9 +3312,9 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = true;
}
vm_map_try_merge_entries(map, entry->prev, entry);
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
}
vm_map_try_merge_entries(map, entry->prev, entry);
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
if (need_wakeup)
vm_map_wakeup(map);
return (rv);
@ -3349,13 +3364,14 @@ vm_map_sync(
/*
* Make a first pass to check for user-wired memory and holes.
*/
for (current = entry; current->start < end; current = current->next) {
for (current = entry; current->start < end;
current = vm_map_entry_succ(current)) {
if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
vm_map_unlock_read(map);
return (KERN_INVALID_ARGUMENT);
}
if (end > current->end &&
current->end != current->next->start) {
current->end != vm_map_entry_succ(current)->start) {
vm_map_unlock_read(map);
return (KERN_INVALID_ADDRESS);
}
@ -3399,7 +3415,7 @@ vm_map_sync(
vm_map_lock_read(map);
if (last_timestamp == map->timestamp ||
!vm_map_lookup_entry(map, start, &current))
current = current->next;
current = vm_map_entry_succ(current);
}
vm_map_unlock_read(map);
@ -3517,7 +3533,7 @@ vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
if (map->system_map)
vm_map_entry_deallocate(entry, TRUE);
else {
entry->next = curthread->td_map_def_user;
entry->defer_next = curthread->td_map_def_user;
curthread->td_map_def_user = entry;
}
}
@ -3542,7 +3558,7 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
* Find the start of the region, and clip it
*/
if (!vm_map_lookup_entry(map, start, &first_entry))
entry = first_entry->next;
entry = vm_map_entry_succ(first_entry);
else {
entry = first_entry;
vm_map_clip_start(map, entry, start);
@ -3580,7 +3596,7 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
*/
if (!vm_map_lookup_entry(map, saved_start,
&tmp_entry))
entry = tmp_entry->next;
entry = vm_map_entry_succ(tmp_entry);
else {
entry = tmp_entry;
vm_map_clip_start(map, entry,
@ -3591,7 +3607,7 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
}
vm_map_clip_end(map, entry, end);
next = entry->next;
next = vm_map_entry_succ(entry);
/*
* Unwire before removing addresses from the pmap; otherwise,
@ -3680,7 +3696,7 @@ vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
return (FALSE);
/* go to next entry */
start = entry->end;
entry = entry->next;
entry = vm_map_entry_succ(entry);
}
return (TRUE);
}
@ -3789,7 +3805,8 @@ vm_map_copy_entry(
fake_entry->object.vm_object = src_object;
fake_entry->start = src_entry->start;
fake_entry->end = src_entry->end;
fake_entry->next = curthread->td_map_def_user;
fake_entry->defer_next =
curthread->td_map_def_user;
curthread->td_map_def_user = fake_entry;
}
@ -4049,7 +4066,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
break;
}
old_entry = old_entry->next;
old_entry = vm_map_entry_succ(old_entry);
}
/*
* Use inlined vm_map_unlock() to postpone handling the deferred
@ -4136,7 +4153,7 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
/*
* If we can't accommodate max_ssize in the current mapping, no go.
*/
if (prev_entry->next->start < addrbos + max_ssize)
if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
return (KERN_NO_SPACE);
/*
@ -4163,7 +4180,7 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
if (rv != KERN_SUCCESS)
return (rv);
new_entry = prev_entry->next;
new_entry = vm_map_entry_succ(prev_entry);
KASSERT(new_entry->end == top || new_entry->start == bot,
("Bad entry start/end for new stack entry"));
KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
@ -4185,9 +4202,9 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
* stack_guard_page for vm_map_growstack().
*/
if (orient == MAP_STACK_GROWS_DOWN)
new_entry->prev->next_read = sgp;
vm_map_entry_pred(new_entry)->next_read = sgp;
else
new_entry->next->next_read = sgp;
vm_map_entry_succ(new_entry)->next_read = sgp;
} else {
(void)vm_map_delete(map, bot, top);
}
@ -4241,14 +4258,14 @@ vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
return (KERN_SUCCESS);
if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
stack_entry = gap_entry->next;
stack_entry = vm_map_entry_succ(gap_entry);
if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
stack_entry->start != gap_entry->end)
return (KERN_FAILURE);
grow_amount = round_page(stack_entry->start - addr);
grow_down = true;
} else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
stack_entry = gap_entry->prev;
stack_entry = vm_map_entry_pred(gap_entry);
if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
stack_entry->end != gap_entry->start)
return (KERN_FAILURE);
@ -4826,9 +4843,10 @@ _vm_map_assert_consistent(vm_map_t map, int check)
KASSERT(entry->start < entry->end,
("map %p start = %jx, end = %jx", map,
(uintmax_t)entry->start, (uintmax_t)entry->end));
KASSERT(entry->end <= entry->next->start,
KASSERT(entry->end <= vm_map_entry_succ(entry)->start,
("map %p end = %jx, next->start = %jx", map,
(uintmax_t)entry->end, (uintmax_t)entry->next->start));
(uintmax_t)entry->end,
(uintmax_t)vm_map_entry_succ(entry)->start));
KASSERT(entry->left == NULL ||
entry->left->start < entry->start,
("map %p left->start = %jx, start = %jx", map,
@ -4837,14 +4855,16 @@ _vm_map_assert_consistent(vm_map_t map, int check)
entry->start < entry->right->start,
("map %p start = %jx, right->start = %jx", map,
(uintmax_t)entry->start, (uintmax_t)entry->right->start));
max_left = vm_map_entry_max_free_left(entry, entry->prev);
max_right = vm_map_entry_max_free_right(entry, entry->next);
max_left = vm_map_entry_max_free_left(entry,
vm_map_entry_pred(entry));
max_right = vm_map_entry_max_free_right(entry,
vm_map_entry_succ(entry));
KASSERT(entry->max_free == MAX(max_left, max_right),
("map %p max = %jx, max_left = %jx, max_right = %jx", map,
(uintmax_t)entry->max_free,
(uintmax_t)max_left, (uintmax_t)max_right));
prev = entry;
}
}
KASSERT(prev->end <= entry->start,
("map %p prev->end = %jx, start = %jx", map,
(uintmax_t)prev->end, (uintmax_t)entry->start));

View File

@ -419,10 +419,25 @@ int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, v
vm_pindex_t *, vm_prot_t *, boolean_t *);
void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
static inline vm_map_entry_t
vm_map_entry_succ(vm_map_entry_t entry)
{
return (entry->next);
}
static inline vm_map_entry_t
vm_map_entry_pred(vm_map_entry_t entry)
{
return (entry->prev);
}
#define VM_MAP_ENTRY_FOREACH(it, map) \
for ((it) = (map)->header.next; \
(it) != &(map)->header; \
(it) = (it)->next)
(it) = vm_map_entry_succ(it))
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,

View File

@ -581,7 +581,7 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
pkm.pm_address = (uintptr_t) NULL;
if (vm_map_lookup_entry(map, addr, &entry)) {
for (; entry->start < addr + size;
entry = entry->next) {
entry = vm_map_entry_succ(entry)) {
if (vm_map_check_protection(map, entry->start,
entry->end, VM_PROT_EXECUTE) == TRUE) {
pkm.pm_address = (uintptr_t) addr;
@ -817,12 +817,15 @@ kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
* up the pages elsewhere.
*/
lastvecindex = -1;
for (current = entry; current->start < end; current = current->next) {
while (entry->start < end) {
/*
* check for contiguity
*/
if (current->end < end && current->next->start > current->end) {
current = entry;
entry = vm_map_entry_succ(current);
if (current->end < end &&
entry->start > current->end) {
vm_map_unlock_read(map);
return (ENOMEM);
}