Undo the merger of mlock()/vslock and munlock()/vsunlock() and the

introduction of kern_mlock() and kern_munlock() in
        src/sys/kern/kern_sysctl.c      1.150
        src/sys/vm/vm_extern.h          1.69
        src/sys/vm/vm_glue.c            1.190
        src/sys/vm/vm_mmap.c            1.179
because different resource limits are appropriate for transient and
"permanent" page wiring requests.

Retain the kern_mlock() and kern_munlock() API in the revived
vslock() and vsunlock() functions.

Combine the best parts of each of the original sets of implementations
with further code cleanup.  Make the mclock() and vslock()
implementations as similar as possible.

Retain the RLIMIT_MEMLOCK check in mlock().  Move the most strigent
test, which can return EAGAIN, last so that requests that have no
hope of ever being satisfied will not be retried unnecessarily.

Disable the test that can return EAGAIN in the vslock() implementation
because it will cause the sysctl code to wedge.

Tested by:	Cy Schubert <Cy.Schubert AT komquats.com>
This commit is contained in:
Don Lewis 2004-03-05 22:03:11 +00:00
parent 6a3d33ac5e
commit 169299398a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=126668
4 changed files with 116 additions and 53 deletions

View File

@ -1000,7 +1000,7 @@ kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
error = sysctl_root(0, name, namelen, &req);
if (req.lock == REQ_WIRED)
kern_munlock(req.td, (vm_offset_t)req.oldptr,
vsunlock(req.td, (vm_offset_t)req.oldptr,
(vm_size_t)req.wiredlen);
SYSCTL_UNLOCK();
@ -1103,7 +1103,7 @@ sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
ret = 0;
if (req->lock == REQ_LOCKED && req->oldptr &&
req->oldfunc == sysctl_old_user) {
ret = kern_mlock(req->td, (vm_offset_t)req->oldptr,
ret = vslock(req->td, (vm_offset_t)req->oldptr,
(vm_size_t)wiredlen);
if (ret == 0) {
req->lock = REQ_WIRED;
@ -1320,7 +1320,7 @@ userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
req = req2;
if (req.lock == REQ_WIRED)
kern_munlock(req.td, (vm_offset_t)req.oldptr,
vsunlock(req.td, (vm_offset_t)req.oldptr,
(vm_size_t)req.wiredlen);
SYSCTL_UNLOCK();

View File

@ -59,8 +59,6 @@ int sstk(struct thread *, void *, int *);
int swapon(struct thread *, void *, int *);
#endif /* TYPEDEF_FOR_UAP */
int kern_mlock(struct thread *, vm_offset_t, vm_size_t);
int kern_munlock(struct thread *, vm_offset_t, vm_size_t);
int kernacc(void *, int, int);
vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
@ -88,6 +86,8 @@ void vmspace_unshare(struct proc *);
void vmspace_free(struct vmspace *);
void vmspace_exitfree(struct proc *);
void vnode_pager_setsize(struct vnode *, vm_ooffset_t);
int vslock(struct thread *, vm_offset_t, vm_size_t);
int vsunlock(struct thread *, vm_offset_t, vm_size_t);
void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
char *);
int vm_fault_quick(caddr_t v, int prot);

View File

@ -183,6 +183,84 @@ useracc(addr, len, rw)
return (rv == TRUE);
}
/*
* MPSAFE
*/
int
vslock(td, addr, size)
struct thread *td;
vm_offset_t addr;
vm_size_t size;
{
vm_offset_t start, end;
struct proc *proc = td->td_proc;
int error, npages;
start = trunc_page(addr);
end = round_page(addr + size);
/* disable wrap around */
if (end <= start)
return (EINVAL);
npages = atop(end - start);
if (npages > vm_page_max_wired)
return (ENOMEM);
PROC_LOCK(proc);
if (npages + pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map)) >
atop(lim_cur(proc, RLIMIT_MEMLOCK))) {
PROC_UNLOCK(proc);
return (ENOMEM);
}
PROC_UNLOCK(proc);
#if 0
/*
* XXX - not yet
*
* The limit for transient usage of wired pages should be
* larger than for "permanent" wired pages (mlock()).
*
* Also, the sysctl code, which is the only present user
* of vslock(), does a hard loop on EAGAIN.
*/
if (npages + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
#endif
error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
/* EINVAL is probably a better error to return than ENOMEM */
return (error == KERN_SUCCESS ? 0 : EINVAL);
}
/*
* MPSAFE
*/
int
vsunlock(td, addr, size)
struct thread *td;
vm_offset_t addr;
vm_size_t size;
{
vm_offset_t start, end;
int error;
start = trunc_page(addr);
end = round_page(addr + size);
/* disable wrap around */
if (end <= start)
return (EINVAL);
error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
return (error == KERN_SUCCESS ? 0 : EINVAL);
}
/*
* Create the U area for a new process.
* This routine directly affects the fork perf for a process.

View File

@ -893,49 +893,42 @@ mlock(td, uap)
struct thread *td;
struct mlock_args *uap;
{
int error;
struct proc *proc = td->td_proc;
vm_offset_t addr, start, end;
vm_size_t size;
int error, npages;
error = suser(td);
if (error)
return (error);
return (kern_mlock(td, (vm_offset_t)uap->addr, (vm_size_t)uap->len));
}
/*
* MPSAFE
*/
int
kern_mlock(td, addr, size)
struct thread *td;
vm_offset_t addr;
vm_size_t size;
{
vm_size_t pageoff;
struct proc *proc = td->td_proc;
int error;
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
size += pageoff;
size = (vm_size_t) round_page(size);
addr = (vm_offset_t)uap->addr;
size = uap->len;
start = trunc_page(addr);
end = round_page(addr + size);
/* disable wrap around */
if (addr + size < addr)
if (end <= start)
return (EINVAL);
if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
npages = atop(end - start);
if (npages > vm_page_max_wired)
return (ENOMEM);
PROC_LOCK(proc);
if (size + ptoa(pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
lim_cur(proc, RLIMIT_MEMLOCK)) {
if (npages + pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map)) >
atop(lim_cur(proc, RLIMIT_MEMLOCK))) {
PROC_UNLOCK(proc);
return (ENOMEM);
}
PROC_UNLOCK(proc);
error = vm_map_wire(&proc->p_vmspace->vm_map, addr,
addr + size, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
if (npages + cnt.v_wire_count > vm_page_max_wired)
return (EAGAIN);
error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}
@ -1050,37 +1043,29 @@ munlock(td, uap)
struct thread *td;
struct munlock_args *uap;
{
vm_offset_t addr, start, end;
vm_size_t size;
int error;
error = suser(td);
if (error)
return (error);
return (kern_munlock(td, (vm_offset_t)uap->addr, (vm_size_t)uap->len));
}
/*
* MPSAFE
*/
int
kern_munlock(td, addr, size)
struct thread *td;
vm_offset_t addr;
vm_size_t size;
{
vm_size_t pageoff;
int error;
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
size += pageoff;
size = (vm_size_t) round_page(size);
addr = (vm_offset_t)uap->addr;
size = uap->len;
start = trunc_page(addr);
end = round_page(addr + size);
/* disable wrap around */
if (addr + size < addr)
if (end <= start)
return (EINVAL);
error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, addr,
addr + size, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
error = suser(td);
if (error)
return (error);
error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
return (error == KERN_SUCCESS ? 0 : ENOMEM);
}