Wrap the ia64-specific freebsd32_mmap_partial() hack in Giant for now
since it calls into VFS and VM. This makes the freebsd32_mmap() routine MP safe and the extra Giants here can be revisited later. Glanced at by: marcel MFC after: 3 days
This commit is contained in:
parent
e6e34c5689
commit
fa34d9b7a5
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=147964
@ -397,6 +397,7 @@ freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap)
|
||||
start = addr;
|
||||
end = addr + len;
|
||||
|
||||
mtx_lock(&Giant);
|
||||
if (start != trunc_page(start)) {
|
||||
error = freebsd32_mmap_partial(td, start,
|
||||
round_page(start), prot,
|
||||
@ -427,11 +428,14 @@ freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap)
|
||||
prot |= VM_PROT_WRITE;
|
||||
map = &td->td_proc->p_vmspace->vm_map;
|
||||
rv = vm_map_remove(map, start, end);
|
||||
if (rv != KERN_SUCCESS)
|
||||
if (rv != KERN_SUCCESS) {
|
||||
mtx_unlock(&Giant);
|
||||
return (EINVAL);
|
||||
}
|
||||
rv = vm_map_find(map, 0, 0,
|
||||
&start, end - start, FALSE,
|
||||
prot, VM_PROT_ALL, 0);
|
||||
mtx_unlock(&Giant);
|
||||
if (rv != KERN_SUCCESS)
|
||||
return (EINVAL);
|
||||
r.fd = fd;
|
||||
@ -445,6 +449,7 @@ freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap)
|
||||
td->td_retval[0] = addr;
|
||||
return (0);
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
if (end == start) {
|
||||
/*
|
||||
* After dealing with the ragged ends, there
|
||||
|
Loading…
Reference in New Issue
Block a user