From fec296887fd52f2265052ccd263b242ecb2f6a56 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 26 Dec 2017 17:59:37 +0000 Subject: [PATCH] Refactor vm_map_find(), creating a separate function, vm_map_alignspace(), for finding aligned free space in the given map. With this change, we always return KERN_NO_SPACE when we fail to find free space. Whereas, previously, we might return KERN_INVALID_ADDRESS. Also, with this change, we explicitly check for address wrap, rather than relying upon the map's min and max addresses to establish sentinel-like regions. This refactoring was inspired by the problem that we addressed in r326098. Reviewed by: kib Tested by: pho Discussed with: markj MFC after: 3 weeks Differential Revision: https://reviews.freebsd.org/D13346 --- sys/vm/vm_map.c | 138 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 97 insertions(+), 41 deletions(-) diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 221559f2d421..90ca0aeddab3 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -132,6 +132,9 @@ static int vmspace_zinit(void *mem, int size, int flags); static int vm_map_zinit(void *mem, int ize, int flags); static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max); +static int vm_map_alignspace(vm_map_t map, vm_object_t object, + vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, + vm_offset_t max_addr, vm_offset_t alignment); static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry); static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry); @@ -1483,6 +1486,70 @@ vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, return (result); } +/* + * Searches for the specified amount of free space in the given map with the + * specified alignment. Performs an address-ordered, first-fit search from + * the given address "*addr", with an optional upper bound "max_addr". If the + * parameter "alignment" is zero, then the alignment is computed from the + * given (object, offset) pair so as to enable the greatest possible use of + * superpage mappings. Returns KERN_SUCCESS and the address of the free space + * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE. + * + * The map must be locked. Initially, there must be at least "length" bytes + * of free space at the given address. + */ +static int +vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, + vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, + vm_offset_t alignment) +{ + vm_offset_t aligned_addr, free_addr; + + VM_MAP_ASSERT_LOCKED(map); + free_addr = *addr; + KASSERT(!vm_map_findspace(map, free_addr, length, addr) && + free_addr == *addr, ("caller provided insufficient free space")); + for (;;) { + /* + * At the start of every iteration, the free space at address + * "*addr" is at least "length" bytes. + */ + if (alignment == 0) + pmap_align_superpage(object, offset, addr, length); + else if ((*addr & (alignment - 1)) != 0) { + *addr &= ~(alignment - 1); + *addr += alignment; + } + aligned_addr = *addr; + if (aligned_addr == free_addr) { + /* + * Alignment did not change "*addr", so "*addr" must + * still provide sufficient free space. + */ + return (KERN_SUCCESS); + } + + /* + * Test for address wrap on "*addr". A wrapped "*addr" could + * be a valid address, in which case vm_map_findspace() cannot + * be relied upon to fail. + */ + if (aligned_addr < free_addr || + vm_map_findspace(map, aligned_addr, length, addr) || + (max_addr != 0 && *addr + length > max_addr)) + return (KERN_NO_SPACE); + free_addr = *addr; + if (free_addr == aligned_addr) { + /* + * If a successful call to vm_map_findspace() did not + * change "*addr", then "*addr" must still be aligned + * and provide sufficient free space. + */ + return (KERN_SUCCESS); + } + } +} + /* * vm_map_find finds an unallocated region in the target address * map with the given length. The search is defined to be @@ -1498,8 +1565,8 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_size_t length, vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, int cow) { - vm_offset_t alignment, initial_addr, start; - int result; + vm_offset_t alignment, min_addr; + int rv; KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 || object == NULL, @@ -1512,50 +1579,39 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, alignment = (vm_offset_t)1 << (find_space >> 8); } else alignment = 0; - initial_addr = *addr; vm_map_lock(map); + if (find_space != VMFS_NO_SPACE) { + KASSERT(find_space == VMFS_ANY_SPACE || + find_space == VMFS_OPTIMAL_SPACE || + find_space == VMFS_SUPER_SPACE || + alignment != 0, ("unexpected VMFS flag")); + min_addr = *addr; again: - start = initial_addr; - do { - if (find_space != VMFS_NO_SPACE) { - if (vm_map_findspace(map, start, length, addr) || - (max_addr != 0 && *addr + length > max_addr)) { - if (find_space == VMFS_OPTIMAL_SPACE) { - find_space = VMFS_ANY_SPACE; - goto again; - } - vm_map_unlock(map); - return (KERN_NO_SPACE); - } - switch (find_space) { - case VMFS_SUPER_SPACE: - case VMFS_OPTIMAL_SPACE: - pmap_align_superpage(object, offset, addr, - length); - break; - case VMFS_ANY_SPACE: - break; - default: - if ((*addr & (alignment - 1)) != 0) { - *addr &= ~(alignment - 1); - *addr += alignment; - } - break; - } - - start = *addr; + if (vm_map_findspace(map, min_addr, length, addr) || + (max_addr != 0 && *addr + length > max_addr)) { + rv = KERN_NO_SPACE; + goto done; } - if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { - result = vm_map_stack_locked(map, start, length, - sgrowsiz, prot, max, cow); - } else { - result = vm_map_insert(map, object, offset, start, - start + length, prot, max, cow); + if (find_space != VMFS_ANY_SPACE && + (rv = vm_map_alignspace(map, object, offset, addr, length, + max_addr, alignment)) != KERN_SUCCESS) { + if (find_space == VMFS_OPTIMAL_SPACE) { + find_space = VMFS_ANY_SPACE; + goto again; + } + goto done; } - } while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE && - find_space != VMFS_ANY_SPACE); + } + if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) { + rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot, + max, cow); + } else { + rv = vm_map_insert(map, object, offset, *addr, *addr + length, + prot, max, cow); + } +done: vm_map_unlock(map); - return (result); + return (rv); } /*