Push Giant deep into vm_forkproc(), acquiring it only if the process has
mapped System V shared memory segments (see shmfork_myhook()) or requires the allocation of an ldt (see vm_fault_wire()).
This commit is contained in:
parent
2e3e26a50c
commit
94ddc7076d
@ -219,9 +219,7 @@ fork1(td, flags, pages, procp)
|
||||
* certain parts of a process from itself.
|
||||
*/
|
||||
if ((flags & RFPROC) == 0) {
|
||||
mtx_lock(&Giant);
|
||||
vm_forkproc(td, NULL, NULL, flags);
|
||||
mtx_unlock(&Giant);
|
||||
|
||||
/*
|
||||
* Close all file descriptors.
|
||||
@ -668,27 +666,25 @@ fork1(td, flags, pages, procp)
|
||||
* Finish creating the child process. It will return via a different
|
||||
* execution path later. (ie: directly into user mode)
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
vm_forkproc(td, p2, td2, flags);
|
||||
|
||||
if (flags == (RFFDG | RFPROC)) {
|
||||
cnt.v_forks++;
|
||||
cnt.v_forkpages += p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize;
|
||||
atomic_add_int(&cnt.v_forks, 1);
|
||||
atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize);
|
||||
} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
|
||||
cnt.v_vforks++;
|
||||
cnt.v_vforkpages += p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize;
|
||||
atomic_add_int(&cnt.v_vforks, 1);
|
||||
atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize);
|
||||
} else if (p1 == &proc0) {
|
||||
cnt.v_kthreads++;
|
||||
cnt.v_kthreadpages += p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize;
|
||||
atomic_add_int(&cnt.v_kthreads, 1);
|
||||
atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize);
|
||||
} else {
|
||||
cnt.v_rforks++;
|
||||
cnt.v_rforkpages += p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize;
|
||||
atomic_add_int(&cnt.v_rforks, 1);
|
||||
atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
|
||||
p2->p_vmspace->vm_ssize);
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
|
||||
/*
|
||||
* Both processes are set up, now check if any loadable modules want
|
||||
|
@ -806,6 +806,7 @@ shmfork_myhook(p1, p2)
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
mtx_lock(&Giant);
|
||||
size = shminfo.shmseg * sizeof(struct shmmap_state);
|
||||
shmmap_s = malloc(size, M_SHM, M_WAITOK);
|
||||
bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
|
||||
@ -813,6 +814,7 @@ shmfork_myhook(p1, p2)
|
||||
for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
|
||||
if (shmmap_s->shmid != -1)
|
||||
shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1026,9 +1026,13 @@ vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
* read-only sections.
|
||||
*/
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
if (map->system_map)
|
||||
mtx_lock(&Giant);
|
||||
rv = vm_fault(map, va,
|
||||
user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
|
||||
user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
|
||||
if (map->system_map)
|
||||
mtx_unlock(&Giant);
|
||||
if (rv) {
|
||||
if (va != start)
|
||||
vm_fault_unwire(map, start, va, fictitious);
|
||||
|
@ -619,8 +619,6 @@ vm_forkproc(td, p2, td2, flags)
|
||||
{
|
||||
struct proc *p1 = td->td_proc;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
if ((flags & RFPROC) == 0) {
|
||||
/*
|
||||
* Divorce the memory, if it is shared, essentially
|
||||
|
@ -2482,8 +2482,6 @@ vmspace_fork(struct vmspace *vm1)
|
||||
vm_map_entry_t new_entry;
|
||||
vm_object_t object;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
vm_map_lock(old_map);
|
||||
|
||||
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
|
||||
@ -2942,7 +2940,6 @@ vmspace_unshare(struct proc *p)
|
||||
struct vmspace *oldvmspace = p->p_vmspace;
|
||||
struct vmspace *newvmspace;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
if (oldvmspace->vm_refcnt == 1)
|
||||
return;
|
||||
newvmspace = vmspace_fork(oldvmspace);
|
||||
|
Loading…
Reference in New Issue
Block a user