Cleanups made necessary by r348115, or reactions to it:

1. Change size_t to vm_size_t in some places.
2. Rename vm_map_entry_resize_free to drop the _free part.
3. Fix whitespace errors.
4. Fix screwups in patch-conflict-management that left out important
changes related to growing and shrinking objects.

Reviewed by: alc
Approved by: kib (mentor)
This commit is contained in:
dougm 2019-05-22 23:11:16 +00:00
parent 4af3cc46e2
commit 1dd70b3646

View File

@ -1241,17 +1241,15 @@ vm_map_entry_unlink(vm_map_t map,
}
/*
* vm_map_entry_resize_free:
* vm_map_entry_resize:
*
* Recompute the amount of free space following a modified vm_map_entry
* and propagate those values up the tree. Call this function after
* resizing a map entry in-place by changing the end value, without a
* call to vm_map_entry_link() or _unlink().
* Resize a vm_map_entry, recompute the amount of free space that
* follows it and propagate that value up the tree.
*
* The map must be locked, and leaves it so.
*/
static void
vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry, size_t grow_amount)
vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
{
vm_map_entry_t llist, rlist, root;
@ -1259,15 +1257,15 @@ vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry, size_t grow_amount)
root = map->root;
root = vm_map_splay_split(entry->start, 0, root, &llist, &rlist);
KASSERT(root != NULL,
("vm_map_entry_resize_free: resize_free object not mapped"));
("%s: resize object not mapped", __func__));
vm_map_splay_findnext(root, &rlist);
root->right = NULL;
entry->end += grow_amount;
map->root = vm_map_splay_merge(root, llist, rlist,
root->left, root->right);
VM_MAP_ASSERT_CONSISTENT(map);
CTR3(KTR_VM, "vm_map_entry_resize_free: map %p, nentries %d, entry %p",
map, map->nentries, entry);
CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
_func__, map, map->nentries, entry);
}
/*
@ -1487,7 +1485,7 @@ charged:
prev_entry));
if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
map->size += end - prev_entry->end;
vm_map_entry_resize_free(map, prev_entry,
vm_map_entry_resize(map, prev_entry,
end - prev_entry->end);
vm_map_simplify_entry(map, prev_entry);
return (KERN_SUCCESS);
@ -4168,7 +4166,7 @@ vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
struct vmspace *vm;
struct ucred *cred;
vm_offset_t gap_end, gap_start, grow_start;
size_t grow_amount, guard, max_grow;
vm_size_t grow_amount, guard, max_grow;
rlim_t lmemlim, stacklim, vmemlim;
int rv, rv1;
bool gap_deleted, grow_down, is_procstack;
@ -4314,8 +4312,7 @@ retry:
gap_deleted = true;
} else {
MPASS(gap_entry->start < gap_entry->end - grow_amount);
gap_entry->end -= grow_amount;
vm_map_entry_resize_free(map, gap_entry, -grow_amount);
vm_map_entry_resize(map, gap_entry, -grow_amount);
gap_deleted = false;
}
rv = vm_map_insert(map, NULL, 0, grow_start,
@ -4329,7 +4326,7 @@ retry:
MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
MPASS(rv1 == KERN_SUCCESS);
} else
vm_map_entry_resize_free(map, gap_entry,
vm_map_entry_resize(map, gap_entry,
grow_amount);
}
} else {
@ -4344,13 +4341,16 @@ retry:
vm_object_coalesce(stack_entry->object.vm_object,
stack_entry->offset,
(vm_size_t)(stack_entry->end - stack_entry->start),
(vm_size_t)grow_amount, cred != NULL)) {
if (gap_entry->start + grow_amount == gap_entry->end)
grow_amount, cred != NULL)) {
if (gap_entry->start + grow_amount == gap_entry->end) {
vm_map_entry_delete(map, gap_entry);
else
vm_map_entry_resize(map, stack_entry,
grow_amount);
} else {
gap_entry->start += grow_amount;
stack_entry->end += grow_amount;
}
map->size += grow_amount;
vm_map_entry_resize_free(map, stack_entry, grow_amount);
rv = KERN_SUCCESS;
} else
rv = KERN_FAILURE;