Instead of looking up a predecessor or successor to the current map

entry, when that entry has been seen already, keep the
already-looked-up value in a variable and use that instead of looking
it up again.

Approved by: alc, markj (earlier version), kib (earlier version)
Differential Revision: https://reviews.freebsd.org/D22348
This commit is contained in:
Doug Moore 2019-11-20 16:06:48 +00:00
parent 0cb5357037
commit 83704cc236
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=354895
3 changed files with 109 additions and 93 deletions

View File

@ -252,7 +252,7 @@ static void
mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred, mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
struct vm_map *map) struct vm_map *map)
{ {
vm_map_entry_t vme; vm_map_entry_t prev, vme;
int result; int result;
vm_prot_t revokeperms; vm_prot_t revokeperms;
vm_object_t backing_object, object; vm_object_t backing_object, object;
@ -263,8 +263,10 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
if (!mac_mmap_revocation) if (!mac_mmap_revocation)
return; return;
prev = &map->header;
vm_map_lock(map); vm_map_lock(map);
VM_MAP_ENTRY_FOREACH(vme, map) { for (vme = vm_map_entry_first(map); vme != &map->header;
prev = vme, vme = vm_map_entry_succ(prev)) {
if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) { if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) {
mac_proc_vm_revoke_recurse(td, cred, mac_proc_vm_revoke_recurse(td, cred,
vme->object.sub_map); vme->object.sub_map);
@ -363,8 +365,7 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
} }
pmap_protect(map->pmap, vme->start, vme->end, pmap_protect(map->pmap, vme->start, vme->end,
vme->protection & ~revokeperms); vme->protection & ~revokeperms);
vm_map_try_merge_entries(map, vm_map_entry_pred(vme), vm_map_try_merge_entries(map, prev, vme);
vme);
} }
} }
vm_map_unlock(map); vm_map_unlock(map);

View File

@ -978,6 +978,21 @@ vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
root->right->max_free : right_ancestor->start - root->end); root->right->max_free : right_ancestor->start - root->end);
} }
/*
* vm_map_entry_{pred,succ}:
*
* Find the {predecessor, successor} of the entry by taking one step
* in the appropriate direction and backtracking as much as necessary.
*/
static inline vm_map_entry_t
vm_map_entry_pred(vm_map_entry_t entry)
{
return (entry->prev);
}
/* vm_map_entry_succ is defined in vm_map.h. */
#define SPLAY_LEFT_STEP(root, y, rlist, test) do { \ #define SPLAY_LEFT_STEP(root, y, rlist, test) do { \
vm_size_t max_free; \ vm_size_t max_free; \
\ \
@ -1412,7 +1427,7 @@ int
vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow) vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
{ {
vm_map_entry_t new_entry, prev_entry; vm_map_entry_t new_entry, next_entry, prev_entry;
struct ucred *cred; struct ucred *cred;
vm_eflags_t protoeflags; vm_eflags_t protoeflags;
vm_inherit_t inheritance; vm_inherit_t inheritance;
@ -1443,7 +1458,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
/* /*
* Assert that the next entry doesn't overlap the end point. * Assert that the next entry doesn't overlap the end point.
*/ */
if (vm_map_entry_succ(prev_entry)->start < end) next_entry = vm_map_entry_succ(prev_entry);
if (next_entry->start < end)
return (KERN_NO_SPACE); return (KERN_NO_SPACE);
if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL || if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
@ -1538,8 +1554,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
map->size += end - prev_entry->end; map->size += end - prev_entry->end;
vm_map_entry_resize(map, prev_entry, vm_map_entry_resize(map, prev_entry,
end - prev_entry->end); end - prev_entry->end);
vm_map_try_merge_entries(map, prev_entry, vm_map_try_merge_entries(map, prev_entry, next_entry);
vm_map_entry_succ(prev_entry));
return (KERN_SUCCESS); return (KERN_SUCCESS);
} }
@ -1600,7 +1615,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
* other cases, which are less common. * other cases, which are less common.
*/ */
vm_map_try_merge_entries(map, prev_entry, new_entry); vm_map_try_merge_entries(map, prev_entry, new_entry);
vm_map_try_merge_entries(map, new_entry, vm_map_entry_succ(new_entry)); vm_map_try_merge_entries(map, new_entry, next_entry);
if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) { if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset), vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
@ -2430,7 +2445,7 @@ int
vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_prot_t new_prot, boolean_t set_max) vm_prot_t new_prot, boolean_t set_max)
{ {
vm_map_entry_t current, entry, in_tran; vm_map_entry_t current, entry, in_tran, prev_entry;
vm_object_t obj; vm_object_t obj;
struct ucred *cred; struct ucred *cred;
vm_prot_t old_prot; vm_prot_t old_prot;
@ -2555,9 +2570,10 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
* Otherwise, just simplify entries, since some may have been modified. * Otherwise, just simplify entries, since some may have been modified.
* [Note that clipping is not necessary the second time.] * [Note that clipping is not necessary the second time.]
*/ */
for (current = entry; current->start < end; for (prev_entry = vm_map_entry_pred(entry), current = entry;
vm_map_try_merge_entries(map, vm_map_entry_pred(current), current), current->start < end;
current = vm_map_entry_succ(current)) { vm_map_try_merge_entries(map, prev_entry, current),
prev_entry = current, current = vm_map_entry_succ(current)) {
if (rv != KERN_SUCCESS || if (rv != KERN_SUCCESS ||
(current->eflags & MAP_ENTRY_GUARD) != 0) (current->eflags & MAP_ENTRY_GUARD) != 0)
continue; continue;
@ -2595,7 +2611,7 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
#undef MASK #undef MASK
} }
} }
vm_map_try_merge_entries(map, vm_map_entry_pred(current), current); vm_map_try_merge_entries(map, prev_entry, current);
vm_map_unlock(map); vm_map_unlock(map);
return (rv); return (rv);
} }
@ -2615,7 +2631,7 @@ vm_map_madvise(
vm_offset_t end, vm_offset_t end,
int behav) int behav)
{ {
vm_map_entry_t current, entry; vm_map_entry_t current, prev_entry;
bool modify_map; bool modify_map;
/* /*
@ -2654,11 +2670,13 @@ vm_map_madvise(
*/ */
VM_MAP_RANGE_CHECK(map, start, end); VM_MAP_RANGE_CHECK(map, start, end);
if (vm_map_lookup_entry(map, start, &entry)) { if (vm_map_lookup_entry(map, start, &current)) {
if (modify_map) if (modify_map)
vm_map_clip_start(map, entry, start); vm_map_clip_start(map, current, start);
prev_entry = vm_map_entry_pred(current);
} else { } else {
entry = vm_map_entry_succ(entry); prev_entry = current;
current = vm_map_entry_succ(current);
} }
if (modify_map) { if (modify_map) {
@ -2668,7 +2686,7 @@ vm_map_madvise(
* We clip the vm_map_entry so that behavioral changes are * We clip the vm_map_entry so that behavioral changes are
* limited to the specified address range. * limited to the specified address range.
*/ */
for (current = entry; current->start < end; for (; current->start < end; prev_entry = current,
current = vm_map_entry_succ(current)) { current = vm_map_entry_succ(current)) {
if (current->eflags & MAP_ENTRY_IS_SUB_MAP) if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
continue; continue;
@ -2700,11 +2718,9 @@ vm_map_madvise(
default: default:
break; break;
} }
vm_map_try_merge_entries(map, vm_map_try_merge_entries(map, prev_entry, current);
vm_map_entry_pred(current), current);
} }
vm_map_try_merge_entries(map, vm_map_entry_pred(current), vm_map_try_merge_entries(map, prev_entry, current);
current);
vm_map_unlock(map); vm_map_unlock(map);
} else { } else {
vm_pindex_t pstart, pend; vm_pindex_t pstart, pend;
@ -2716,7 +2732,7 @@ vm_map_madvise(
* Since we don't clip the vm_map_entry, we have to clip * Since we don't clip the vm_map_entry, we have to clip
* the vm_object pindex and count. * the vm_object pindex and count.
*/ */
for (current = entry; current->start < end; for (; current->start < end;
current = vm_map_entry_succ(current)) { current = vm_map_entry_succ(current)) {
vm_offset_t useEnd, useStart; vm_offset_t useEnd, useStart;
@ -2804,8 +2820,7 @@ int
vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
vm_inherit_t new_inheritance) vm_inherit_t new_inheritance)
{ {
vm_map_entry_t entry; vm_map_entry_t entry, prev_entry;
vm_map_entry_t temp_entry;
switch (new_inheritance) { switch (new_inheritance) {
case VM_INHERIT_NONE: case VM_INHERIT_NONE:
@ -2820,20 +2835,21 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
return (KERN_SUCCESS); return (KERN_SUCCESS);
vm_map_lock(map); vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end); VM_MAP_RANGE_CHECK(map, start, end);
if (vm_map_lookup_entry(map, start, &temp_entry)) { if (vm_map_lookup_entry(map, start, &prev_entry)) {
entry = temp_entry; entry = prev_entry;
vm_map_clip_start(map, entry, start); vm_map_clip_start(map, entry, start);
prev_entry = vm_map_entry_pred(entry);
} else } else
entry = vm_map_entry_succ(temp_entry); entry = vm_map_entry_succ(prev_entry);
while (entry->start < end) { for (; entry->start < end;
prev_entry = entry, entry = vm_map_entry_succ(entry)) {
vm_map_clip_end(map, entry, end); vm_map_clip_end(map, entry, end);
if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
new_inheritance != VM_INHERIT_ZERO) new_inheritance != VM_INHERIT_ZERO)
entry->inheritance = new_inheritance; entry->inheritance = new_inheritance;
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); vm_map_try_merge_entries(map, prev_entry, entry);
entry = vm_map_entry_succ(entry);
} }
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map); vm_map_unlock(map);
return (KERN_SUCCESS); return (KERN_SUCCESS);
} }
@ -2896,9 +2912,9 @@ int
vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
int flags) int flags)
{ {
vm_map_entry_t entry, first_entry; vm_map_entry_t entry, first_entry, next_entry, prev_entry;
int rv; int rv;
bool first_iteration, holes_ok, need_wakeup, user_unwire; bool holes_ok, need_wakeup, user_unwire;
if (start == end) if (start == end)
return (KERN_SUCCESS); return (KERN_SUCCESS);
@ -2914,28 +2930,26 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
return (KERN_INVALID_ADDRESS); return (KERN_INVALID_ADDRESS);
} }
} }
first_iteration = true;
entry = first_entry;
rv = KERN_SUCCESS; rv = KERN_SUCCESS;
while (entry->start < end) { for (entry = first_entry; entry->start < end; entry = next_entry) {
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
/* /*
* We have not yet clipped the entry. * We have not yet clipped the entry.
*/ */
entry = vm_map_entry_in_transition(map, start, &end, next_entry = vm_map_entry_in_transition(map, start,
holes_ok, entry); &end, holes_ok, entry);
if (entry == NULL) { if (next_entry == NULL) {
if (first_iteration) { if (entry == first_entry) {
vm_map_unlock(map); vm_map_unlock(map);
return (KERN_INVALID_ADDRESS); return (KERN_INVALID_ADDRESS);
} }
rv = KERN_INVALID_ADDRESS; rv = KERN_INVALID_ADDRESS;
break; break;
} }
first_entry = first_iteration ? entry : NULL; first_entry = (entry == first_entry) ?
next_entry : NULL;
continue; continue;
} }
first_iteration = false;
vm_map_clip_start(map, entry, start); vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end); vm_map_clip_end(map, entry, end);
/* /*
@ -2947,13 +2961,13 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
("owned map entry %p", entry)); ("owned map entry %p", entry));
entry->eflags |= MAP_ENTRY_IN_TRANSITION; entry->eflags |= MAP_ENTRY_IN_TRANSITION;
entry->wiring_thread = curthread; entry->wiring_thread = curthread;
next_entry = vm_map_entry_succ(entry);
/* /*
* Check the map for holes in the specified region. * Check the map for holes in the specified region.
* If holes_ok, skip this check. * If holes_ok, skip this check.
*/ */
if (!holes_ok && if (!holes_ok &&
(entry->end < end && entry->end < end && next_entry->start > entry->end) {
vm_map_entry_succ(entry)->start > entry->end)) {
end = entry->end; end = entry->end;
rv = KERN_INVALID_ADDRESS; rv = KERN_INVALID_ADDRESS;
break; break;
@ -2967,16 +2981,19 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
rv = KERN_INVALID_ARGUMENT; rv = KERN_INVALID_ARGUMENT;
break; break;
} }
entry = vm_map_entry_succ(entry);
} }
need_wakeup = false; need_wakeup = false;
if (first_entry == NULL && if (first_entry == NULL &&
!vm_map_lookup_entry(map, start, &first_entry)) { !vm_map_lookup_entry(map, start, &first_entry)) {
KASSERT(holes_ok, ("vm_map_unwire: lookup failed")); KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
first_entry = vm_map_entry_succ(first_entry); prev_entry = first_entry;
entry = vm_map_entry_succ(first_entry);
} else {
prev_entry = vm_map_entry_pred(first_entry);
entry = first_entry;
} }
for (entry = first_entry; entry->start < end; for (; entry->start < end;
entry = vm_map_entry_succ(entry)) { prev_entry = entry, entry = vm_map_entry_succ(entry)) {
/* /*
* If holes_ok was specified, an empty * If holes_ok was specified, an empty
* space in the unwired region could have been mapped * space in the unwired region could have been mapped
@ -3012,9 +3029,9 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = true; need_wakeup = true;
} }
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); vm_map_try_merge_entries(map, prev_entry, entry);
} }
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); vm_map_try_merge_entries(map, prev_entry, entry);
vm_map_unlock(map); vm_map_unlock(map);
if (need_wakeup) if (need_wakeup)
vm_map_wakeup(map); vm_map_wakeup(map);
@ -3100,12 +3117,12 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
int int
vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags) vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
{ {
vm_map_entry_t entry, first_entry, tmp_entry; vm_map_entry_t entry, first_entry, next_entry, prev_entry;
vm_offset_t faddr, saved_end, saved_start; vm_offset_t faddr, saved_end, saved_start;
u_long npages; u_long npages;
u_int last_timestamp; u_int last_timestamp;
int rv; int rv;
bool first_iteration, holes_ok, need_wakeup, user_wire; bool holes_ok, need_wakeup, user_wire;
vm_prot_t prot; vm_prot_t prot;
VM_MAP_ASSERT_LOCKED(map); VM_MAP_ASSERT_LOCKED(map);
@ -3124,25 +3141,23 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
else else
return (KERN_INVALID_ADDRESS); return (KERN_INVALID_ADDRESS);
} }
first_iteration = true; for (entry = first_entry; entry->start < end; entry = next_entry) {
entry = first_entry;
while (entry->start < end) {
if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
/* /*
* We have not yet clipped the entry. * We have not yet clipped the entry.
*/ */
entry = vm_map_entry_in_transition(map, start, &end, next_entry = vm_map_entry_in_transition(map, start,
holes_ok, entry); &end, holes_ok, entry);
if (entry == NULL) { if (next_entry == NULL) {
if (first_iteration) if (entry == first_entry)
return (KERN_INVALID_ADDRESS); return (KERN_INVALID_ADDRESS);
rv = KERN_INVALID_ADDRESS; rv = KERN_INVALID_ADDRESS;
goto done; goto done;
} }
first_entry = first_iteration ? entry : NULL; first_entry = (entry == first_entry) ?
next_entry : NULL;
continue; continue;
} }
first_iteration = false;
vm_map_clip_start(map, entry, start); vm_map_clip_start(map, entry, start);
vm_map_clip_end(map, entry, end); vm_map_clip_end(map, entry, end);
/* /*
@ -3205,15 +3220,13 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
* deleted. * deleted.
*/ */
if (!vm_map_lookup_entry(map, saved_start, if (!vm_map_lookup_entry(map, saved_start,
&tmp_entry)) &next_entry))
KASSERT(false, KASSERT(false,
("vm_map_wire: lookup failed")); ("vm_map_wire: lookup failed"));
if (entry == first_entry) first_entry = (entry == first_entry) ?
first_entry = tmp_entry; next_entry : NULL;
else for (entry = next_entry; entry->end < saved_end;
first_entry = NULL; entry = vm_map_entry_succ(entry)) {
entry = tmp_entry;
while (entry->end < saved_end) {
/* /*
* In case of failure, handle entries * In case of failure, handle entries
* that were not fully wired here; * that were not fully wired here;
@ -3224,7 +3237,6 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
faddr < entry->end) faddr < entry->end)
vm_map_wire_entry_failure(map, vm_map_wire_entry_failure(map,
entry, faddr); entry, faddr);
entry = vm_map_entry_succ(entry);
} }
} }
if (rv != KERN_SUCCESS) { if (rv != KERN_SUCCESS) {
@ -3242,14 +3254,13 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
* Check the map for holes in the specified region. * Check the map for holes in the specified region.
* If holes_ok was specified, skip this check. * If holes_ok was specified, skip this check.
*/ */
next_entry = vm_map_entry_succ(entry);
if (!holes_ok && if (!holes_ok &&
entry->end < end && entry->end < end && next_entry->start > entry->end) {
vm_map_entry_succ(entry)->start > entry->end) {
end = entry->end; end = entry->end;
rv = KERN_INVALID_ADDRESS; rv = KERN_INVALID_ADDRESS;
goto done; goto done;
} }
entry = vm_map_entry_succ(entry);
} }
rv = KERN_SUCCESS; rv = KERN_SUCCESS;
done: done:
@ -3257,10 +3268,14 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
if (first_entry == NULL && if (first_entry == NULL &&
!vm_map_lookup_entry(map, start, &first_entry)) { !vm_map_lookup_entry(map, start, &first_entry)) {
KASSERT(holes_ok, ("vm_map_wire: lookup failed")); KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
first_entry = vm_map_entry_succ(first_entry); prev_entry = first_entry;
entry = vm_map_entry_succ(first_entry);
} else {
prev_entry = vm_map_entry_pred(first_entry);
entry = first_entry;
} }
for (entry = first_entry; entry->start < end; for (; entry->start < end;
entry = vm_map_entry_succ(entry)) { prev_entry = entry, entry = vm_map_entry_succ(entry)) {
/* /*
* If holes_ok was specified, an empty * If holes_ok was specified, an empty
* space in the unwired region could have been mapped * space in the unwired region could have been mapped
@ -3313,9 +3328,9 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = true; need_wakeup = true;
} }
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); vm_map_try_merge_entries(map, prev_entry, entry);
} }
vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry); vm_map_try_merge_entries(map, prev_entry, entry);
if (need_wakeup) if (need_wakeup)
vm_map_wakeup(map); vm_map_wakeup(map);
return (rv); return (rv);
@ -3345,8 +3360,7 @@ vm_map_sync(
boolean_t syncio, boolean_t syncio,
boolean_t invalidate) boolean_t invalidate)
{ {
vm_map_entry_t current; vm_map_entry_t current, entry, next_entry;
vm_map_entry_t entry;
vm_size_t size; vm_size_t size;
vm_object_t object; vm_object_t object;
vm_ooffset_t offset; vm_ooffset_t offset;
@ -3366,13 +3380,14 @@ vm_map_sync(
* Make a first pass to check for user-wired memory and holes. * Make a first pass to check for user-wired memory and holes.
*/ */
for (current = entry; current->start < end; for (current = entry; current->start < end;
current = vm_map_entry_succ(current)) { current = next_entry) {
if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) { if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
vm_map_unlock_read(map); vm_map_unlock_read(map);
return (KERN_INVALID_ARGUMENT); return (KERN_INVALID_ARGUMENT);
} }
next_entry = vm_map_entry_succ(current);
if (end > current->end && if (end > current->end &&
current->end != vm_map_entry_succ(current)->start) { current->end != next_entry->start) {
vm_map_unlock_read(map); vm_map_unlock_read(map);
return (KERN_INVALID_ADDRESS); return (KERN_INVALID_ADDRESS);
} }
@ -3914,7 +3929,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
new_map->anon_loc = old_map->anon_loc; new_map->anon_loc = old_map->anon_loc;
old_entry = old_map->header.next; old_entry = vm_map_entry_first(old_map);
while (old_entry != &old_map->header) { while (old_entry != &old_map->header) {
if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)

View File

@ -420,6 +420,13 @@ int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, v
void vm_map_lookup_done (vm_map_t, vm_map_entry_t); void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *); boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
static inline vm_map_entry_t
vm_map_entry_first(vm_map_t map)
{
return (map->header.next);
}
static inline vm_map_entry_t static inline vm_map_entry_t
vm_map_entry_succ(vm_map_entry_t entry) vm_map_entry_succ(vm_map_entry_t entry)
{ {
@ -427,15 +434,8 @@ vm_map_entry_succ(vm_map_entry_t entry)
return (entry->next); return (entry->next);
} }
static inline vm_map_entry_t #define VM_MAP_ENTRY_FOREACH(it, map) \
vm_map_entry_pred(vm_map_entry_t entry) for ((it) = vm_map_entry_first(map); \
{
return (entry->prev);
}
#define VM_MAP_ENTRY_FOREACH(it, map) \
for ((it) = (map)->header.next; \
(it) != &(map)->header; \ (it) != &(map)->header; \
(it) = vm_map_entry_succ(it)) (it) = vm_map_entry_succ(it))
int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t); int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);