Add kern_vm_mmap2(), kern_vm_mprotect(), kern_vm_msync(), kern_vm_munlock(),

kern_vm_munmap(), and kern_vm_madvise(), and use them in various compats
instead of their sys_*() counterparts.

Reviewed by:	ed, dchagin, kib
MFC after:	2 weeks
Sponsored by:	DARPA, AFRL
Differential Revision:	https://reviews.freebsd.org/D9378
This commit is contained in:
Edward Tomasz Napierala 2017-02-06 20:57:12 +00:00
parent 1110d0029a
commit 69cdfcef2e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=313352
6 changed files with 166 additions and 216 deletions

View File

@ -28,7 +28,9 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/mman.h>
#include <sys/sysproto.h>
#include <sys/proc.h>
#include <vm/vm_extern.h>
#include <contrib/cloudabi/cloudabi_types_common.h>
@ -62,137 +64,115 @@ int
cloudabi_sys_mem_advise(struct thread *td,
struct cloudabi_sys_mem_advise_args *uap)
{
struct madvise_args madvise_args = {
.addr = uap->mapping,
.len = uap->mapping_len
};
int behav;
switch (uap->advice) {
case CLOUDABI_ADVICE_DONTNEED:
madvise_args.behav = MADV_DONTNEED;
behav = MADV_DONTNEED;
break;
case CLOUDABI_ADVICE_NORMAL:
madvise_args.behav = MADV_NORMAL;
behav = MADV_NORMAL;
break;
case CLOUDABI_ADVICE_RANDOM:
madvise_args.behav = MADV_RANDOM;
behav = MADV_RANDOM;
break;
case CLOUDABI_ADVICE_SEQUENTIAL:
madvise_args.behav = MADV_SEQUENTIAL;
behav = MADV_SEQUENTIAL;
break;
case CLOUDABI_ADVICE_WILLNEED:
madvise_args.behav = MADV_WILLNEED;
behav = MADV_WILLNEED;
break;
default:
return (EINVAL);
}
return (sys_madvise(td, &madvise_args));
return (kern_vm_madvise(td, (vm_offset_t)uap->mapping,
uap->mapping_len, behav));
}
int
cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap)
{
struct mlock_args mlock_args = {
.addr = uap->mapping,
.len = uap->mapping_len
};
return (sys_mlock(td, &mlock_args));
return (vm_mlock(td->td_proc, td->td_ucred, uap->mapping,
uap->mapping_len));
}
int
cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap)
{
struct mmap_args mmap_args = {
.addr = uap->addr,
.len = uap->len,
.fd = uap->fd,
.pos = uap->off
};
int error;
int error, flags, prot;
/* Translate flags. */
flags = 0;
if (uap->flags & CLOUDABI_MAP_ANON)
mmap_args.flags |= MAP_ANON;
flags |= MAP_ANON;
if (uap->flags & CLOUDABI_MAP_FIXED)
mmap_args.flags |= MAP_FIXED;
flags |= MAP_FIXED;
if (uap->flags & CLOUDABI_MAP_PRIVATE)
mmap_args.flags |= MAP_PRIVATE;
flags |= MAP_PRIVATE;
if (uap->flags & CLOUDABI_MAP_SHARED)
mmap_args.flags |= MAP_SHARED;
flags |= MAP_SHARED;
/* Translate protection. */
error = convert_mprot(uap->prot, &mmap_args.prot);
error = convert_mprot(uap->prot, &prot);
if (error != 0)
return (error);
return (sys_mmap(td, &mmap_args));
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len, prot,
flags, uap->fd, uap->off));
}
int
cloudabi_sys_mem_protect(struct thread *td,
struct cloudabi_sys_mem_protect_args *uap)
{
struct mprotect_args mprotect_args = {
.addr = uap->mapping,
.len = uap->mapping_len,
};
int error;
int error, prot;
/* Translate protection. */
error = convert_mprot(uap->prot, &mprotect_args.prot);
error = convert_mprot(uap->prot, &prot);
if (error != 0)
return (error);
return (sys_mprotect(td, &mprotect_args));
return (kern_vm_mprotect(td, (vm_offset_t)uap->mapping,
uap->mapping_len, prot));
}
int
cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap)
{
struct msync_args msync_args = {
.addr = uap->mapping,
.len = uap->mapping_len,
};
int flags;
/* Convert flags. */
switch (uap->flags & (CLOUDABI_MS_ASYNC | CLOUDABI_MS_SYNC)) {
case CLOUDABI_MS_ASYNC:
msync_args.flags |= MS_ASYNC;
flags = MS_ASYNC;
break;
case CLOUDABI_MS_SYNC:
msync_args.flags |= MS_SYNC;
flags = MS_SYNC;
break;
default:
return (EINVAL);
}
if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0)
msync_args.flags |= MS_INVALIDATE;
flags |= MS_INVALIDATE;
return (sys_msync(td, &msync_args));
return (kern_vm_msync(td, (vm_offset_t)uap->mapping,
uap->mapping_len, flags));
}
int
cloudabi_sys_mem_unlock(struct thread *td,
struct cloudabi_sys_mem_unlock_args *uap)
{
struct munlock_args munlock_args = {
.addr = uap->mapping,
.len = uap->mapping_len
};
return (sys_munlock(td, &munlock_args));
return (kern_vm_munlock(td, (vm_offset_t)uap->mapping, uap->mapping_len));
}
int
cloudabi_sys_mem_unmap(struct thread *td,
struct cloudabi_sys_mem_unmap_args *uap)
{
struct munmap_args munmap_args = {
.addr = uap->mapping,
.len = uap->mapping_len
};
return (sys_munmap(td, &munmap_args));
return (kern_vm_munmap(td, (vm_offset_t)uap->mapping, uap->mapping_len));
}

View File

@ -449,42 +449,30 @@ freebsd32_fexecve(struct thread *td, struct freebsd32_fexecve_args *uap)
int
freebsd32_mprotect(struct thread *td, struct freebsd32_mprotect_args *uap)
{
struct mprotect_args ap;
int prot;
ap.addr = PTRIN(uap->addr);
ap.len = uap->len;
ap.prot = uap->prot;
prot = uap->prot;
#if defined(__amd64__)
if (i386_read_exec && (ap.prot & PROT_READ) != 0)
ap.prot |= PROT_EXEC;
if (i386_read_exec && (prot & PROT_READ) != 0)
prot |= PROT_EXEC;
#endif
return (sys_mprotect(td, &ap));
return (kern_vm_mprotect(td, (vm_offset_t)PTRIN(uap->addr),
uap->len, prot));
}
int
freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap)
{
struct mmap_args ap;
vm_offset_t addr = (vm_offset_t) uap->addr;
vm_size_t len = uap->len;
int prot = uap->prot;
int flags = uap->flags;
int fd = uap->fd;
off_t pos = PAIR32TO64(off_t,uap->pos);
int prot;
prot = uap->prot;
#if defined(__amd64__)
if (i386_read_exec && (prot & PROT_READ))
prot |= PROT_EXEC;
#endif
ap.addr = (void *) addr;
ap.len = len;
ap.prot = prot;
ap.flags = flags;
ap.fd = fd;
ap.pos = pos;
return (sys_mmap(td, &ap));
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len, prot,
uap->flags, uap->fd, PAIR32TO64(off_t, uap->pos)));
}
#ifdef COMPAT_FREEBSD6

View File

@ -585,10 +585,8 @@ linux_select(struct thread *td, struct linux_select_args *args)
int
linux_mremap(struct thread *td, struct linux_mremap_args *args)
{
struct munmap_args /* {
void *addr;
size_t len;
} */ bsd_args;
uintptr_t addr;
size_t len;
int error = 0;
#ifdef DEBUG
@ -623,10 +621,9 @@ linux_mremap(struct thread *td, struct linux_mremap_args *args)
}
if (args->new_len < args->old_len) {
bsd_args.addr =
(caddr_t)((uintptr_t)args->addr + args->new_len);
bsd_args.len = args->old_len - args->new_len;
error = sys_munmap(td, &bsd_args);
addr = args->addr + args->new_len;
len = args->old_len - args->new_len;
error = kern_vm_munmap(td, addr, len);
}
td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
@ -640,13 +637,9 @@ linux_mremap(struct thread *td, struct linux_mremap_args *args)
int
linux_msync(struct thread *td, struct linux_msync_args *args)
{
struct msync_args bsd_args;
bsd_args.addr = (caddr_t)(uintptr_t)args->addr;
bsd_args.len = (uintptr_t)args->len;
bsd_args.flags = args->fl & ~LINUX_MS_SYNC;
return (sys_msync(td, &bsd_args));
return (kern_vm_msync(td, args->addr, args->len,
args->fl & ~LINUX_MS_SYNC));
}
int

View File

@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysproto.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
#include <vm/vm_map.h>
#include <compat/linux/linux_emul.h>
@ -67,15 +68,7 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
{
struct proc *p = td->td_proc;
struct vmspace *vms = td->td_proc->p_vmspace;
struct mmap_args /* {
caddr_t addr;
size_t len;
int prot;
int flags;
int fd;
off_t pos;
} */ bsd_args;
int error;
int bsd_flags, error;
struct file *fp;
cap_rights_t rights;
@ -83,7 +76,7 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
addr, len, prot, flags, fd, pos);
error = 0;
bsd_args.flags = 0;
bsd_flags = 0;
fp = NULL;
/*
@ -94,21 +87,21 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
return (EINVAL);
if (flags & LINUX_MAP_SHARED)
bsd_args.flags |= MAP_SHARED;
bsd_flags |= MAP_SHARED;
if (flags & LINUX_MAP_PRIVATE)
bsd_args.flags |= MAP_PRIVATE;
bsd_flags |= MAP_PRIVATE;
if (flags & LINUX_MAP_FIXED)
bsd_args.flags |= MAP_FIXED;
bsd_flags |= MAP_FIXED;
if (flags & LINUX_MAP_ANON) {
/* Enforce pos to be on page boundary, then ignore. */
if ((pos & PAGE_MASK) != 0)
return (EINVAL);
pos = 0;
bsd_args.flags |= MAP_ANON;
bsd_flags |= MAP_ANON;
} else
bsd_args.flags |= MAP_NOSYNC;
bsd_flags |= MAP_NOSYNC;
if (flags & LINUX_MAP_GROWSDOWN)
bsd_args.flags |= MAP_STACK;
bsd_flags |= MAP_STACK;
/*
* PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
@ -118,14 +111,13 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
*
* XXX. Linux checks that the file system is not mounted with noexec.
*/
bsd_args.prot = prot;
#if defined(__amd64__)
linux_fixup_prot(td, &bsd_args.prot);
linux_fixup_prot(td, &prot);
#endif
/* Linux does not check file descriptor when MAP_ANONYMOUS is set. */
bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : fd;
if (bsd_args.fd != -1) {
fd = (bsd_flags & MAP_ANON) ? -1 : fd;
if (fd != -1) {
/*
* Linux follows Solaris mmap(2) description:
* The file descriptor fildes is opened with
@ -133,8 +125,7 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
* protection options specified.
*/
error = fget(td, bsd_args.fd,
cap_rights_init(&rights, CAP_MMAP), &fp);
error = fget(td, fd, cap_rights_init(&rights, CAP_MMAP), &fp);
if (error != 0)
return (error);
if (fp->f_type != DTYPE_VNODE) {
@ -205,21 +196,13 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
* we map the full stack, since we don't have a way
* to autogrow it.
*/
if (len > STACK_SIZE - GUARD_SIZE) {
bsd_args.addr = (caddr_t)addr;
bsd_args.len = len;
} else {
bsd_args.addr = (caddr_t)addr -
(STACK_SIZE - GUARD_SIZE - len);
bsd_args.len = STACK_SIZE - GUARD_SIZE;
if (len <= STACK_SIZE - GUARD_SIZE) {
addr = addr - (STACK_SIZE - GUARD_SIZE - len);
len = STACK_SIZE - GUARD_SIZE;
}
} else {
bsd_args.addr = (caddr_t)addr;
bsd_args.len = len;
}
bsd_args.pos = pos;
error = sys_mmap(td, &bsd_args);
error = kern_vm_mmap(td, addr, len, prot, bsd_flags, fd, pos);
LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]);
@ -229,16 +212,11 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
int
linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot)
{
struct mprotect_args bsd_args;
bsd_args.addr = (void *)addr;
bsd_args.len = len;
bsd_args.prot = prot;
#if defined(__amd64__)
linux_fixup_prot(td, &bsd_args.prot);
linux_fixup_prot(td, &prot);
#endif
return (sys_mprotect(td, &bsd_args));
return (kern_vm_mprotect(td, addr, len, prot));
}
#if defined(__amd64__)

View File

@ -71,6 +71,16 @@ void kmem_init(vm_offset_t, vm_offset_t);
void kmem_init_zero_region(void);
void kmeminit(void);
int kern_vm_mmap(struct thread *td, vm_offset_t addr, vm_size_t size,
vm_prot_t prot, int flags, int fd, off_t pos);
int kern_vm_mprotect(struct thread *td, vm_offset_t addr, vm_size_t size,
vm_prot_t prot);
int kern_vm_msync(struct thread *td, vm_offset_t addr, vm_size_t size,
int flags);
int kern_vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size);
int kern_vm_munmap(struct thread *td, vm_offset_t addr, vm_size_t size);
int kern_vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len,
int behav);
void swapout_procs(int);
int kernacc(void *, int, int);
int useracc(void *, int, int);

View File

@ -187,27 +187,26 @@ struct mmap_args {
* MPSAFE
*/
int
sys_mmap(td, uap)
struct thread *td;
struct mmap_args *uap;
sys_mmap(struct thread *td, struct mmap_args *uap)
{
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
uap->prot, uap->flags, uap->fd, uap->pos));
}
int
kern_vm_mmap(struct thread *td, vm_offset_t addr, vm_size_t size,
vm_prot_t prot, int flags, int fd, off_t pos)
{
struct file *fp;
vm_offset_t addr;
vm_size_t size, pageoff;
vm_size_t pageoff;
vm_prot_t cap_maxprot;
int align, error, flags, prot;
off_t pos;
int align, error;
struct vmspace *vms = td->td_proc->p_vmspace;
cap_rights_t rights;
addr = (vm_offset_t) uap->addr;
size = uap->len;
prot = uap->prot;
flags = uap->flags;
pos = uap->pos;
fp = NULL;
AUDIT_ARG_FD(uap->fd);
AUDIT_ARG_FD(fd);
/*
* Ignore old flags that used to be defined but did not do anything.
@ -224,8 +223,8 @@ sys_mmap(td, uap)
* pos.
*/
if (!SV_CURPROC_FLAG(SV_AOUT)) {
if ((uap->len == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
((flags & MAP_ANON) != 0 && (uap->fd != -1 || pos != 0)))
if ((size == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
return (EINVAL);
} else {
if ((flags & MAP_ANON) != 0)
@ -233,7 +232,7 @@ sys_mmap(td, uap)
}
if (flags & MAP_STACK) {
if ((uap->fd != -1) ||
if ((fd != -1) ||
((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
return (EINVAL);
flags |= MAP_ANON;
@ -353,7 +352,7 @@ sys_mmap(td, uap)
}
if (prot & PROT_EXEC)
cap_rights_set(&rights, CAP_MMAP_X);
error = fget_mmap(td, uap->fd, &rights, &cap_maxprot, &fp);
error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
if (error != 0)
goto done;
if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
@ -380,15 +379,9 @@ sys_mmap(td, uap)
int
freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
{
struct mmap_args oargs;
oargs.addr = uap->addr;
oargs.len = uap->len;
oargs.prot = uap->prot;
oargs.flags = uap->flags;
oargs.fd = uap->fd;
oargs.pos = uap->pos;
return (sys_mmap(td, &oargs));
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
uap->prot, uap->flags, uap->fd, uap->pos));
}
#endif
@ -404,11 +397,8 @@ struct ommap_args {
};
#endif
int
ommap(td, uap)
struct thread *td;
struct ommap_args *uap;
ommap(struct thread *td, struct ommap_args *uap)
{
struct mmap_args nargs;
static const char cvtbsdprot[8] = {
0,
PROT_EXEC,
@ -419,36 +409,34 @@ ommap(td, uap)
PROT_WRITE | PROT_READ,
PROT_EXEC | PROT_WRITE | PROT_READ,
};
int flags, prot;
#define OMAP_ANON 0x0002
#define OMAP_COPY 0x0020
#define OMAP_SHARED 0x0010
#define OMAP_FIXED 0x0100
nargs.addr = uap->addr;
nargs.len = uap->len;
nargs.prot = cvtbsdprot[uap->prot & 0x7];
prot = cvtbsdprot[uap->prot & 0x7];
#ifdef COMPAT_FREEBSD32
#if defined(__amd64__)
if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
nargs.prot != 0)
nargs.prot |= PROT_EXEC;
prot != 0)
prot |= PROT_EXEC;
#endif
#endif
nargs.flags = 0;
flags = 0;
if (uap->flags & OMAP_ANON)
nargs.flags |= MAP_ANON;
flags |= MAP_ANON;
if (uap->flags & OMAP_COPY)
nargs.flags |= MAP_COPY;
flags |= MAP_COPY;
if (uap->flags & OMAP_SHARED)
nargs.flags |= MAP_SHARED;
flags |= MAP_SHARED;
else
nargs.flags |= MAP_PRIVATE;
flags |= MAP_PRIVATE;
if (uap->flags & OMAP_FIXED)
nargs.flags |= MAP_FIXED;
nargs.fd = uap->fd;
nargs.pos = uap->pos;
return (sys_mmap(td, &nargs));
flags |= MAP_FIXED;
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
prot, flags, uap->fd, uap->pos));
}
#endif /* COMPAT_43 */
@ -464,20 +452,20 @@ struct msync_args {
* MPSAFE
*/
int
sys_msync(td, uap)
struct thread *td;
struct msync_args *uap;
sys_msync(struct thread *td, struct msync_args *uap)
{
vm_offset_t addr;
vm_size_t size, pageoff;
int flags;
return (kern_vm_msync(td, (vm_offset_t)uap->addr, uap->len,
uap->flags));
}
int
kern_vm_msync(struct thread *td, vm_offset_t addr, vm_size_t size, int flags)
{
vm_size_t pageoff;
vm_map_t map;
int rv;
addr = (vm_offset_t) uap->addr;
size = uap->len;
flags = uap->flags;
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
size += pageoff;
@ -519,21 +507,23 @@ struct munmap_args {
* MPSAFE
*/
int
sys_munmap(td, uap)
struct thread *td;
struct munmap_args *uap;
sys_munmap(struct thread *td, struct munmap_args *uap)
{
return (kern_vm_munmap(td, (vm_offset_t)uap->addr, uap->len));
}
int
kern_vm_munmap(struct thread *td, vm_offset_t addr, vm_size_t size)
{
#ifdef HWPMC_HOOKS
struct pmckern_map_out pkm;
vm_map_entry_t entry;
bool pmc_handled;
#endif
vm_offset_t addr;
vm_size_t size, pageoff;
vm_size_t pageoff;
vm_map_t map;
addr = (vm_offset_t) uap->addr;
size = uap->len;
if (size == 0)
return (EINVAL);
@ -602,18 +592,20 @@ struct mprotect_args {
* MPSAFE
*/
int
sys_mprotect(td, uap)
struct thread *td;
struct mprotect_args *uap;
sys_mprotect(struct thread *td, struct mprotect_args *uap)
{
vm_offset_t addr;
vm_size_t size, pageoff;
vm_prot_t prot;
addr = (vm_offset_t) uap->addr;
size = uap->len;
prot = uap->prot & VM_PROT_ALL;
return (kern_vm_mprotect(td, (vm_offset_t)uap->addr, uap->len,
uap->prot));
}
int
kern_vm_mprotect(struct thread *td, vm_offset_t addr, vm_size_t size,
vm_prot_t prot)
{
vm_size_t pageoff;
prot = (prot & VM_PROT_ALL);
pageoff = (addr & PAGE_MASK);
addr -= pageoff;
size += pageoff;
@ -688,6 +680,14 @@ int
sys_madvise(td, uap)
struct thread *td;
struct madvise_args *uap;
{
return (kern_vm_madvise(td, (vm_offset_t)uap->addr, uap->len,
uap->behav));
}
int
kern_vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len, int behav)
{
vm_offset_t start, end;
vm_map_t map;
@ -697,7 +697,7 @@ sys_madvise(td, uap)
* Check for our special case, advising the swap pager we are
* "immortal."
*/
if (uap->behav == MADV_PROTECT) {
if (behav == MADV_PROTECT) {
flags = PPROT_SET;
return (kern_procctl(td, P_PID, td->td_proc->p_pid,
PROC_SPROTECT, &flags));
@ -706,27 +706,26 @@ sys_madvise(td, uap)
/*
* Check for illegal behavior
*/
if (uap->behav < 0 || uap->behav > MADV_CORE)
if (behav < 0 || behav > MADV_CORE)
return (EINVAL);
/*
* Check for illegal addresses. Watch out for address wrap... Note
* that VM_*_ADDRESS are not constants due to casts (argh).
*/
map = &td->td_proc->p_vmspace->vm_map;
if ((vm_offset_t)uap->addr < vm_map_min(map) ||
(vm_offset_t)uap->addr + uap->len > vm_map_max(map))
if (addr < vm_map_min(map) || addr + len > vm_map_max(map))
return (EINVAL);
if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
if ((addr + len) < addr)
return (EINVAL);
/*
* Since this routine is only advisory, we default to conservative
* behavior.
*/
start = trunc_page((vm_offset_t) uap->addr);
end = round_page((vm_offset_t) uap->addr + uap->len);
start = trunc_page(addr);
end = round_page(addr + len);
if (vm_map_madvise(map, start, end, uap->behav))
if (vm_map_madvise(map, start, end, behav))
return (EINVAL);
return (0);
}
@ -1189,12 +1188,16 @@ struct munlock_args {
* MPSAFE
*/
int
sys_munlock(td, uap)
struct thread *td;
struct munlock_args *uap;
sys_munlock(struct thread *td, struct munlock_args *uap)
{
vm_offset_t addr, end, last, start;
vm_size_t size;
return (kern_vm_munlock(td, (vm_offset_t)uap->addr, uap->len));
}
int
kern_vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size)
{
vm_offset_t end, last, start;
#ifdef RACCT
vm_map_t map;
#endif
@ -1203,8 +1206,6 @@ sys_munlock(td, uap)
error = priv_check(td, PRIV_VM_MUNLOCK);
if (error)
return (error);
addr = (vm_offset_t)uap->addr;
size = uap->len;
last = addr + size;
start = trunc_page(addr);
end = round_page(last);