diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index f0fb75b52d6e..9b63f474ab2d 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -423,6 +423,29 @@ _vm_map_clear_recursive(vm_map_t map, const char *file, int line) { } +/* + * vm_map_unlock_and_wait: + */ +static __inline int +vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait) +{ + + GIANT_REQUIRED; + vm_map_unlock(map); + + return (tsleep(&map->root, PVM, "vmmapw", 0)); +} + +/* + * vm_map_wakeup: + */ +static __inline void +vm_map_wakeup(vm_map_t map) +{ + + wakeup(&map->root); +} + long vmspace_resident_count(struct vmspace *vmspace) { @@ -958,7 +981,7 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry) vm_map_entry_t next, prev; vm_size_t prevsize, esize; - if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) + if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) return; prev = entry->prev; @@ -1460,6 +1483,145 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, return (KERN_SUCCESS); } +/* + * vm_map_unwire: + * + * Implements both kernel and user unwire. + */ +int +vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, + boolean_t user_unwire) +{ + vm_map_entry_t entry, first_entry, tmp_entry; + vm_offset_t saved_start; + unsigned int last_timestamp; + int rv; + boolean_t need_wakeup, result; + + vm_map_lock(map); + VM_MAP_RANGE_CHECK(map, start, end); + if (!vm_map_lookup_entry(map, start, &first_entry)) { + vm_map_unlock(map); + return (KERN_INVALID_ADDRESS); + } + last_timestamp = map->timestamp; + need_wakeup = FALSE; + entry = first_entry; + while (entry != &map->header && entry->start < end) { + if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { + /* + * We have not yet clipped the entry. + */ + saved_start = (start >= entry->start) ? start : + entry->start; + entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; + if (need_wakeup) { + vm_map_wakeup(map); + need_wakeup = FALSE; + } + if (vm_map_unlock_and_wait(map, user_unwire)) { + /* + * Allow interruption of user unwiring? + */ + } + vm_map_lock(map); + if (last_timestamp+1 != map->timestamp) { + /* + * Look again for the entry because the map was + * modified while it was unlocked. + * Specifically, the entry may have been + * clipped, merged, or deleted. + */ + if (!vm_map_lookup_entry(map, saved_start, + &tmp_entry)) { + if (saved_start == start) { + /* + * First_entry has been deleted. + */ + vm_map_unlock(map); + return (KERN_INVALID_ADDRESS); + } + end = saved_start; + rv = KERN_INVALID_ADDRESS; + goto done; + } + if (entry == first_entry) + first_entry = tmp_entry; + else + first_entry = NULL; + entry = tmp_entry; + } + last_timestamp = map->timestamp; + continue; + } + vm_map_clip_start(map, entry, start); + vm_map_clip_end(map, entry, end); + /* + * Mark the entry in case the map lock is released. (See + * above.) + */ + entry->eflags |= MAP_ENTRY_IN_TRANSITION; + /* + * Check the map for holes in the specified region. + */ + if (entry->end < end && (entry->next == &map->header || + entry->next->start > entry->end)) { + end = entry->end; + rv = KERN_INVALID_ADDRESS; + goto done; + } + /* + * Require that the entry is wired. + */ + if (entry->wired_count == 0 || (user_unwire && + (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)) { + end = entry->end; + rv = KERN_INVALID_ARGUMENT; + goto done; + } + entry = entry->next; + } + if (first_entry == NULL) { + result = vm_map_lookup_entry(map, start, &first_entry); + KASSERT(result, ("vm_map_unwire: lookup failed")); + } + entry = first_entry; + while (entry != &map->header && entry->start < end) { + if (user_unwire) + entry->eflags &= ~MAP_ENTRY_USER_WIRED; + entry->wired_count--; + if (entry->wired_count == 0) { + /* + * Retain the map lock. + */ + vm_fault_unwire(map, entry->start, entry->end); + } + entry = entry->next; + } + rv = KERN_SUCCESS; +done: + if (first_entry == NULL) { + result = vm_map_lookup_entry(map, start, &first_entry); + KASSERT(result, ("vm_map_unwire: lookup failed")); + } + entry = first_entry; + while (entry != &map->header && entry->start < end) { + KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION, + ("vm_map_unwire: in-transition flag missing")); + entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; + if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { + entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; + need_wakeup = TRUE; + } + vm_map_simplify_entry(map, entry); + entry = entry->next; + } + vm_map_unlock(map); + if (need_wakeup) + vm_map_wakeup(map); + return (rv); +} + /* * Implement the semantics of mlock */ diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index b4c75e62381d..66e54fe8c876 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -131,6 +131,8 @@ struct vm_map_entry { #define MAP_ENTRY_BEHAV_MASK 0x00C0 +#define MAP_ENTRY_IN_TRANSITION 0x0100 /* entry being changed */ +#define MAP_ENTRY_NEEDS_WAKEUP 0x0200 /* waiters in transition */ #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ #ifdef _KERNEL @@ -316,6 +318,8 @@ void vm_map_simplify_entry (vm_map_t, vm_map_entry_t); void vm_init2 (void); int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int); int vm_map_growstack (struct proc *p, vm_offset_t addr); +int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, + boolean_t user_unwire); int vmspace_swap_count (struct vmspace *vmspace); int vm_uiomove(vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *); #endif /* _KERNEL */