Fix a long standing (from the original 4.4BSD lite sources) race between
vmspace_fork and vm_map_wire that would lead to "vm_fault_copy_wired: page missing" panics. While faulting in pages for a map entry that is being wired down, mark the containing map as busy. In vmspace_fork wait until the map is unbusy, before we try to copy the entries. Reviewed by: kib MFC after: 5 days Sponsored by: Isilon Systems, Inc.
This commit is contained in:
parent
c3ffedd66b
commit
da2dde653e
@ -671,6 +671,41 @@ vm_map_wakeup(vm_map_t map)
|
||||
wakeup(&map->root);
|
||||
}
|
||||
|
||||
void
|
||||
vm_map_busy(vm_map_t map)
|
||||
{
|
||||
|
||||
VM_MAP_ASSERT_LOCKED(map);
|
||||
map->busy++;
|
||||
}
|
||||
|
||||
void
|
||||
vm_map_unbusy(vm_map_t map)
|
||||
{
|
||||
|
||||
VM_MAP_ASSERT_LOCKED(map);
|
||||
KASSERT(map->busy, ("vm_map_unbusy: not busy"));
|
||||
if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
|
||||
vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
|
||||
wakeup(&map->busy);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
vm_map_wait_busy(vm_map_t map)
|
||||
{
|
||||
|
||||
VM_MAP_ASSERT_LOCKED(map);
|
||||
while (map->busy) {
|
||||
vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
|
||||
if (map->system_map)
|
||||
msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
|
||||
else
|
||||
sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
|
||||
}
|
||||
map->timestamp++;
|
||||
}
|
||||
|
||||
long
|
||||
vmspace_resident_count(struct vmspace *vmspace)
|
||||
{
|
||||
@ -718,6 +753,7 @@ _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
|
||||
map->flags = 0;
|
||||
map->root = NULL;
|
||||
map->timestamp = 0;
|
||||
map->busy = 0;
|
||||
}
|
||||
|
||||
void
|
||||
@ -2382,12 +2418,14 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
entry->object.vm_object->type == OBJT_SG);
|
||||
/*
|
||||
* Release the map lock, relying on the in-transition
|
||||
* mark.
|
||||
* mark. Mark the map busy for fork.
|
||||
*/
|
||||
vm_map_busy(map);
|
||||
vm_map_unlock(map);
|
||||
rv = vm_fault_wire(map, saved_start, saved_end,
|
||||
fictitious);
|
||||
vm_map_lock(map);
|
||||
vm_map_unbusy(map);
|
||||
if (last_timestamp + 1 != map->timestamp) {
|
||||
/*
|
||||
* Look again for the entry because the map was
|
||||
@ -2995,6 +3033,8 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
|
||||
int locked;
|
||||
|
||||
vm_map_lock(old_map);
|
||||
if (old_map->busy)
|
||||
vm_map_wait_busy(old_map);
|
||||
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
|
||||
if (vm2 == NULL)
|
||||
goto unlock_and_return;
|
||||
|
@ -187,12 +187,14 @@ struct vm_map {
|
||||
pmap_t pmap; /* (c) Physical map */
|
||||
#define min_offset header.start /* (c) */
|
||||
#define max_offset header.end /* (c) */
|
||||
int busy;
|
||||
};
|
||||
|
||||
/*
|
||||
* vm_flags_t values
|
||||
*/
|
||||
#define MAP_WIREFUTURE 0x01 /* wire all future pages */
|
||||
#define MAP_BUSY_WAKEUP 0x02
|
||||
|
||||
#ifdef _KERNEL
|
||||
static __inline vm_offset_t
|
||||
@ -275,6 +277,9 @@ int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
|
||||
void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
|
||||
int vm_map_locked(vm_map_t map);
|
||||
void vm_map_wakeup(vm_map_t map);
|
||||
void vm_map_busy(vm_map_t map);
|
||||
void vm_map_unbusy(vm_map_t map);
|
||||
void vm_map_wait_busy(vm_map_t map);
|
||||
|
||||
#define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
|
||||
#define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
|
||||
|
Loading…
x
Reference in New Issue
Block a user