Fix a race with free'ing vmspaces at process exit when vmspaces are

shared.

Also introduce vm_endcopy instead of using pointer tricks when
initializing new vmspaces.

The race occured because of how the reference was utilized:
  test vmspace reference,
  possibly block,
  decrement reference

When sharing a vmspace between multiple processes it was possible
for two processes exiting at the same time to test the reference
count, possibly block and neither one free because they wouldn't
see the other's update.

Submitted by: green
This commit is contained in:
Alfred Perlstein 2002-02-05 21:23:05 +00:00
parent 4974a170b0
commit 582ec34cd8
5 changed files with 37 additions and 20 deletions

View File

@ -217,13 +217,14 @@ exit1(td, rv)
* Can't free the entire vmspace as the kernel stack
* may be mapped within that space also.
*/
if (vm->vm_refcnt == 1) {
if (--vm->vm_refcnt == 0) {
if (vm->vm_shm)
shmexit(p);
pmap_remove_pages(vmspace_pmap(vm), VM_MIN_ADDRESS,
VM_MAXUSER_ADDRESS);
(void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS,
VM_MAXUSER_ADDRESS);
vm->vm_freer = p;
}
PROC_LOCK(p);
@ -400,8 +401,8 @@ exit1(td, rv)
/*
* Finally, call machine-dependent code to release the remaining
* resources including address space, the kernel stack and pcb.
* The address space is released by "vmspace_free(p->p_vmspace)"
* in vm_waitproc();
* The address space is released by "vmspace_exitfree(p)" in
* vm_waitproc().
*/
cpu_exit(td);

View File

@ -90,6 +90,7 @@ struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_exec __P((struct proc *));
void vmspace_unshare __P((struct proc *));
void vmspace_free __P((struct vmspace *));
void vmspace_exitfree __P((struct proc *));
void vnode_pager_setsize __P((struct vnode *, vm_ooffset_t));
void vslock __P((caddr_t, u_int));
void vsunlock __P((caddr_t, u_int));

View File

@ -305,7 +305,7 @@ vm_waitproc(p)
pmap_dispose_proc(p); /* drop per-process resources */
FOREACH_THREAD_IN_PROC(p, td)
pmap_dispose_thread(td);
vmspace_free(p->p_vmspace); /* and clean-out the vmspace */
vmspace_exitfree(p); /* and clean-out the vmspace */
}
/*

View File

@ -172,6 +172,7 @@ vmspace_alloc(min, max)
vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */
vm->vm_refcnt = 1;
vm->vm_shm = NULL;
vm->vm_freer = NULL;
return (vm);
}
@ -189,6 +190,24 @@ vm_init2(void)
vm_object_init2();
}
static __inline void
vmspace_dofree( struct vmspace *vm)
{
CTR1(KTR_VM, "vmspace_free: %p", vm);
/*
* Lock the map, to wait out all other references to it.
* Delete all of the mappings and pages they hold, then call
* the pmap module to reclaim anything left.
*/
vm_map_lock(&vm->vm_map);
(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
vm_map_unlock(&vm->vm_map);
pmap_release(vmspace_pmap(vm));
vm_map_destroy(&vm->vm_map);
zfree(vmspace_zone, vm);
}
void
vmspace_free(struct vmspace *vm)
{
@ -197,23 +216,17 @@ vmspace_free(struct vmspace *vm)
if (vm->vm_refcnt == 0)
panic("vmspace_free: attempt to free already freed vmspace");
if (--vm->vm_refcnt == 0) {
if (--vm->vm_refcnt == 0)
vmspace_dofree(vm);
}
CTR1(KTR_VM, "vmspace_free: %p", vm);
/*
* Lock the map, to wait out all other references to it.
* Delete all of the mappings and pages they hold, then call
* the pmap module to reclaim anything left.
*/
vm_map_lock(&vm->vm_map);
(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
vm_map_unlock(&vm->vm_map);
void
vmspace_exitfree(struct proc *p)
{
GIANT_REQUIRED;
pmap_release(vmspace_pmap(vm));
vm_map_destroy(&vm->vm_map);
zfree(vmspace_zone, vm);
}
if (p == p->p_vmspace->vm_freer)
vmspace_dofree(p->p_vmspace);
}
/*
@ -2317,7 +2330,7 @@ vmspace_fork(struct vmspace *vm1)
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
(caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
(caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy);
new_map = &vm2->vm_map; /* XXX */
new_map->timestamp = 1;

View File

@ -188,6 +188,8 @@ struct vmspace {
caddr_t vm_daddr; /* user virtual address of data XXX */
caddr_t vm_maxsaddr; /* user VA at max stack growth */
caddr_t vm_minsaddr; /* user VA at max stack growth */
#define vm_endcopy vm_freer
struct proc *vm_freer; /* vm freed on whose behalf */
};
#ifdef _KERNEL