The mlockall() or VM_MAP_WIRE_HOLESOK does not interact properly with

parallel creation of the map entries, e.g. by mmap() or stack growing.
It also breaks when other entry is wired in parallel.

The vm_map_wire() iterates over the map entries in the region, and
assumes that map entries it finds are marked as in transition before,
also that any entry marked as in transition, are marked by the current
invocation of vm_map_wire().  This is not true for new entries in the
holes.

Add the thread owner of the MAP_ENTRY_IN_TRANSITION flag to struct
vm_map_entry.  In vm_map_wire() and vm_map_unwire(), only process the
entries which transition owner is the current thread.

Reported and tested by:	pho
Reviewed by:	alc
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2013-07-11 05:55:08 +00:00
parent ebf5d94e82
commit 0acea7dfde
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=253190
2 changed files with 52 additions and 11 deletions

View File

@ -2281,6 +2281,7 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
* above.)
*/
entry->eflags |= MAP_ENTRY_IN_TRANSITION;
entry->wiring_thread = curthread;
/*
* Check the map for holes in the specified region.
* If VM_MAP_WIRE_HOLESOK was specified, skip this check.
@ -2313,8 +2314,24 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
else
KASSERT(result, ("vm_map_unwire: lookup failed"));
}
entry = first_entry;
while (entry != &map->header && entry->start < end) {
for (entry = first_entry; entry != &map->header && entry->start < end;
entry = entry->next) {
/*
* If VM_MAP_WIRE_HOLESOK was specified, an empty
* space in the unwired region could have been mapped
* while the map lock was dropped for draining
* MAP_ENTRY_IN_TRANSITION. Moreover, another thread
* could be simultaneously wiring this new mapping
* entry. Detect these cases and skip any entries
* marked as in transition by us.
*/
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
entry->wiring_thread != curthread) {
KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
("vm_map_unwire: !HOLESOK and new/changed entry"));
continue;
}
if (rv == KERN_SUCCESS && (!user_unwire ||
(entry->eflags & MAP_ENTRY_USER_WIRED))) {
if (user_unwire)
@ -2330,15 +2347,15 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
OBJ_FICTITIOUS) != 0);
}
}
KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
("vm_map_unwire: in-transition flag missing"));
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
("vm_map_unwire: in-transition flag missing"));
entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
entry->wiring_thread = NULL;
if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = TRUE;
}
vm_map_simplify_entry(map, entry);
entry = entry->next;
}
vm_map_unlock(map);
if (need_wakeup)
@ -2432,6 +2449,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
* above.)
*/
entry->eflags |= MAP_ENTRY_IN_TRANSITION;
entry->wiring_thread = curthread;
if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
|| (entry->protection & prot) != prot) {
entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
@ -2523,10 +2541,27 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
else
KASSERT(result, ("vm_map_wire: lookup failed"));
}
entry = first_entry;
while (entry != &map->header && entry->start < end) {
for (entry = first_entry; entry != &map->header && entry->start < end;
entry = entry->next) {
if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
goto next_entry_done;
/*
* If VM_MAP_WIRE_HOLESOK was specified, an empty
* space in the unwired region could have been mapped
* while the map lock was dropped for faulting in the
* pages or draining MAP_ENTRY_IN_TRANSITION.
* Moreover, another thread could be simultaneously
* wiring this new mapping entry. Detect these cases
* and skip any entries marked as in transition by us.
*/
if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
entry->wiring_thread != curthread) {
KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
("vm_map_wire: !HOLESOK and new/changed entry"));
continue;
}
if (rv == KERN_SUCCESS) {
if (user_wire)
entry->eflags |= MAP_ENTRY_USER_WIRED;
@ -2551,15 +2586,18 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
}
}
next_entry_done:
KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
("vm_map_wire: in-transition flag missing"));
entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION|MAP_ENTRY_WIRE_SKIPPED);
KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
("vm_map_wire: in-transition flag missing %p", entry));
KASSERT(entry->wiring_thread == curthread,
("vm_map_wire: alien wire %p", entry));
entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
MAP_ENTRY_WIRE_SKIPPED);
entry->wiring_thread = NULL;
if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
need_wakeup = TRUE;
}
vm_map_simplify_entry(map, entry);
entry = entry->next;
}
vm_map_unlock(map);
if (need_wakeup)
@ -3193,6 +3231,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
*new_entry = *old_entry;
new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
MAP_ENTRY_IN_TRANSITION);
new_entry->wiring_thread = NULL;
new_entry->wired_count = 0;
if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
vnode_pager_update_writecount(object,
@ -3227,6 +3266,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
*/
new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
new_entry->wiring_thread = NULL;
new_entry->wired_count = 0;
new_entry->object.vm_object = NULL;
new_entry->cred = NULL;

View File

@ -116,6 +116,7 @@ struct vm_map_entry {
int wired_count; /* can be paged if = 0 */
vm_pindex_t next_read; /* index of the next sequential read */
struct ucred *cred; /* tmp storage for creator ref */
struct thread *wiring_thread;
};
#define MAP_ENTRY_NOSYNC 0x0001