Add kern_vm_mmap2(), kern_vm_mprotect(), kern_vm_msync(), kern_vm_munlock(),
kern_vm_munmap(), and kern_vm_madvise(), and use them in various compats instead of their sys_*() counterparts. Reviewed by: ed, dchagin, kib MFC after: 2 weeks Sponsored by: DARPA, AFRL Differential Revision: https://reviews.freebsd.org/D9378
This commit is contained in:
parent
54b033bd1b
commit
322d98e692
@ -28,7 +28,9 @@ __FBSDID("$FreeBSD$");
|
|||||||
|
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#include <sys/sysproto.h>
|
#include <sys/proc.h>
|
||||||
|
|
||||||
|
#include <vm/vm_extern.h>
|
||||||
|
|
||||||
#include <contrib/cloudabi/cloudabi_types_common.h>
|
#include <contrib/cloudabi/cloudabi_types_common.h>
|
||||||
|
|
||||||
@ -62,137 +64,115 @@ int
|
|||||||
cloudabi_sys_mem_advise(struct thread *td,
|
cloudabi_sys_mem_advise(struct thread *td,
|
||||||
struct cloudabi_sys_mem_advise_args *uap)
|
struct cloudabi_sys_mem_advise_args *uap)
|
||||||
{
|
{
|
||||||
struct madvise_args madvise_args = {
|
int behav;
|
||||||
.addr = uap->mapping,
|
|
||||||
.len = uap->mapping_len
|
|
||||||
};
|
|
||||||
|
|
||||||
switch (uap->advice) {
|
switch (uap->advice) {
|
||||||
case CLOUDABI_ADVICE_DONTNEED:
|
case CLOUDABI_ADVICE_DONTNEED:
|
||||||
madvise_args.behav = MADV_DONTNEED;
|
behav = MADV_DONTNEED;
|
||||||
break;
|
break;
|
||||||
case CLOUDABI_ADVICE_NORMAL:
|
case CLOUDABI_ADVICE_NORMAL:
|
||||||
madvise_args.behav = MADV_NORMAL;
|
behav = MADV_NORMAL;
|
||||||
break;
|
break;
|
||||||
case CLOUDABI_ADVICE_RANDOM:
|
case CLOUDABI_ADVICE_RANDOM:
|
||||||
madvise_args.behav = MADV_RANDOM;
|
behav = MADV_RANDOM;
|
||||||
break;
|
break;
|
||||||
case CLOUDABI_ADVICE_SEQUENTIAL:
|
case CLOUDABI_ADVICE_SEQUENTIAL:
|
||||||
madvise_args.behav = MADV_SEQUENTIAL;
|
behav = MADV_SEQUENTIAL;
|
||||||
break;
|
break;
|
||||||
case CLOUDABI_ADVICE_WILLNEED:
|
case CLOUDABI_ADVICE_WILLNEED:
|
||||||
madvise_args.behav = MADV_WILLNEED;
|
behav = MADV_WILLNEED;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (sys_madvise(td, &madvise_args));
|
return (kern_vm_madvise(td, (vm_offset_t)uap->mapping,
|
||||||
|
uap->mapping_len, behav));
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap)
|
cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap)
|
||||||
{
|
{
|
||||||
struct mlock_args mlock_args = {
|
|
||||||
.addr = uap->mapping,
|
|
||||||
.len = uap->mapping_len
|
|
||||||
};
|
|
||||||
|
|
||||||
return (sys_mlock(td, &mlock_args));
|
return (vm_mlock(td->td_proc, td->td_ucred, uap->mapping,
|
||||||
|
uap->mapping_len));
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap)
|
cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap)
|
||||||
{
|
{
|
||||||
struct mmap_args mmap_args = {
|
int error, flags, prot;
|
||||||
.addr = uap->addr,
|
|
||||||
.len = uap->len,
|
|
||||||
.fd = uap->fd,
|
|
||||||
.pos = uap->off
|
|
||||||
};
|
|
||||||
int error;
|
|
||||||
|
|
||||||
/* Translate flags. */
|
/* Translate flags. */
|
||||||
|
flags = 0;
|
||||||
if (uap->flags & CLOUDABI_MAP_ANON)
|
if (uap->flags & CLOUDABI_MAP_ANON)
|
||||||
mmap_args.flags |= MAP_ANON;
|
flags |= MAP_ANON;
|
||||||
if (uap->flags & CLOUDABI_MAP_FIXED)
|
if (uap->flags & CLOUDABI_MAP_FIXED)
|
||||||
mmap_args.flags |= MAP_FIXED;
|
flags |= MAP_FIXED;
|
||||||
if (uap->flags & CLOUDABI_MAP_PRIVATE)
|
if (uap->flags & CLOUDABI_MAP_PRIVATE)
|
||||||
mmap_args.flags |= MAP_PRIVATE;
|
flags |= MAP_PRIVATE;
|
||||||
if (uap->flags & CLOUDABI_MAP_SHARED)
|
if (uap->flags & CLOUDABI_MAP_SHARED)
|
||||||
mmap_args.flags |= MAP_SHARED;
|
flags |= MAP_SHARED;
|
||||||
|
|
||||||
/* Translate protection. */
|
/* Translate protection. */
|
||||||
error = convert_mprot(uap->prot, &mmap_args.prot);
|
error = convert_mprot(uap->prot, &prot);
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
return (error);
|
return (error);
|
||||||
|
|
||||||
return (sys_mmap(td, &mmap_args));
|
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len, prot,
|
||||||
|
flags, uap->fd, uap->off));
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
cloudabi_sys_mem_protect(struct thread *td,
|
cloudabi_sys_mem_protect(struct thread *td,
|
||||||
struct cloudabi_sys_mem_protect_args *uap)
|
struct cloudabi_sys_mem_protect_args *uap)
|
||||||
{
|
{
|
||||||
struct mprotect_args mprotect_args = {
|
int error, prot;
|
||||||
.addr = uap->mapping,
|
|
||||||
.len = uap->mapping_len,
|
|
||||||
};
|
|
||||||
int error;
|
|
||||||
|
|
||||||
/* Translate protection. */
|
/* Translate protection. */
|
||||||
error = convert_mprot(uap->prot, &mprotect_args.prot);
|
error = convert_mprot(uap->prot, &prot);
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
return (error);
|
return (error);
|
||||||
|
|
||||||
return (sys_mprotect(td, &mprotect_args));
|
return (kern_vm_mprotect(td, (vm_offset_t)uap->mapping,
|
||||||
|
uap->mapping_len, prot));
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap)
|
cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap)
|
||||||
{
|
{
|
||||||
struct msync_args msync_args = {
|
int flags;
|
||||||
.addr = uap->mapping,
|
|
||||||
.len = uap->mapping_len,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Convert flags. */
|
/* Convert flags. */
|
||||||
switch (uap->flags & (CLOUDABI_MS_ASYNC | CLOUDABI_MS_SYNC)) {
|
switch (uap->flags & (CLOUDABI_MS_ASYNC | CLOUDABI_MS_SYNC)) {
|
||||||
case CLOUDABI_MS_ASYNC:
|
case CLOUDABI_MS_ASYNC:
|
||||||
msync_args.flags |= MS_ASYNC;
|
flags = MS_ASYNC;
|
||||||
break;
|
break;
|
||||||
case CLOUDABI_MS_SYNC:
|
case CLOUDABI_MS_SYNC:
|
||||||
msync_args.flags |= MS_SYNC;
|
flags = MS_SYNC;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
}
|
}
|
||||||
if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0)
|
if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0)
|
||||||
msync_args.flags |= MS_INVALIDATE;
|
flags |= MS_INVALIDATE;
|
||||||
|
|
||||||
return (sys_msync(td, &msync_args));
|
return (kern_vm_msync(td, (vm_offset_t)uap->mapping,
|
||||||
|
uap->mapping_len, flags));
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
cloudabi_sys_mem_unlock(struct thread *td,
|
cloudabi_sys_mem_unlock(struct thread *td,
|
||||||
struct cloudabi_sys_mem_unlock_args *uap)
|
struct cloudabi_sys_mem_unlock_args *uap)
|
||||||
{
|
{
|
||||||
struct munlock_args munlock_args = {
|
|
||||||
.addr = uap->mapping,
|
|
||||||
.len = uap->mapping_len
|
|
||||||
};
|
|
||||||
|
|
||||||
return (sys_munlock(td, &munlock_args));
|
return (kern_vm_munlock(td, (vm_offset_t)uap->mapping, uap->mapping_len));
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
cloudabi_sys_mem_unmap(struct thread *td,
|
cloudabi_sys_mem_unmap(struct thread *td,
|
||||||
struct cloudabi_sys_mem_unmap_args *uap)
|
struct cloudabi_sys_mem_unmap_args *uap)
|
||||||
{
|
{
|
||||||
struct munmap_args munmap_args = {
|
|
||||||
.addr = uap->mapping,
|
|
||||||
.len = uap->mapping_len
|
|
||||||
};
|
|
||||||
|
|
||||||
return (sys_munmap(td, &munmap_args));
|
return (kern_vm_munmap(td, (vm_offset_t)uap->mapping, uap->mapping_len));
|
||||||
}
|
}
|
||||||
|
@ -449,42 +449,30 @@ freebsd32_fexecve(struct thread *td, struct freebsd32_fexecve_args *uap)
|
|||||||
int
|
int
|
||||||
freebsd32_mprotect(struct thread *td, struct freebsd32_mprotect_args *uap)
|
freebsd32_mprotect(struct thread *td, struct freebsd32_mprotect_args *uap)
|
||||||
{
|
{
|
||||||
struct mprotect_args ap;
|
int prot;
|
||||||
|
|
||||||
ap.addr = PTRIN(uap->addr);
|
prot = uap->prot;
|
||||||
ap.len = uap->len;
|
|
||||||
ap.prot = uap->prot;
|
|
||||||
#if defined(__amd64__)
|
#if defined(__amd64__)
|
||||||
if (i386_read_exec && (ap.prot & PROT_READ) != 0)
|
if (i386_read_exec && (prot & PROT_READ) != 0)
|
||||||
ap.prot |= PROT_EXEC;
|
prot |= PROT_EXEC;
|
||||||
#endif
|
#endif
|
||||||
return (sys_mprotect(td, &ap));
|
return (kern_vm_mprotect(td, (vm_offset_t)PTRIN(uap->addr),
|
||||||
|
uap->len, prot));
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap)
|
freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap)
|
||||||
{
|
{
|
||||||
struct mmap_args ap;
|
int prot;
|
||||||
vm_offset_t addr = (vm_offset_t) uap->addr;
|
|
||||||
vm_size_t len = uap->len;
|
|
||||||
int prot = uap->prot;
|
|
||||||
int flags = uap->flags;
|
|
||||||
int fd = uap->fd;
|
|
||||||
off_t pos = PAIR32TO64(off_t,uap->pos);
|
|
||||||
|
|
||||||
|
prot = uap->prot;
|
||||||
#if defined(__amd64__)
|
#if defined(__amd64__)
|
||||||
if (i386_read_exec && (prot & PROT_READ))
|
if (i386_read_exec && (prot & PROT_READ))
|
||||||
prot |= PROT_EXEC;
|
prot |= PROT_EXEC;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ap.addr = (void *) addr;
|
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len, prot,
|
||||||
ap.len = len;
|
uap->flags, uap->fd, PAIR32TO64(off_t, uap->pos)));
|
||||||
ap.prot = prot;
|
|
||||||
ap.flags = flags;
|
|
||||||
ap.fd = fd;
|
|
||||||
ap.pos = pos;
|
|
||||||
|
|
||||||
return (sys_mmap(td, &ap));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef COMPAT_FREEBSD6
|
#ifdef COMPAT_FREEBSD6
|
||||||
|
@ -585,10 +585,8 @@ select_out:
|
|||||||
int
|
int
|
||||||
linux_mremap(struct thread *td, struct linux_mremap_args *args)
|
linux_mremap(struct thread *td, struct linux_mremap_args *args)
|
||||||
{
|
{
|
||||||
struct munmap_args /* {
|
uintptr_t addr;
|
||||||
void *addr;
|
size_t len;
|
||||||
size_t len;
|
|
||||||
} */ bsd_args;
|
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
@ -623,10 +621,9 @@ linux_mremap(struct thread *td, struct linux_mremap_args *args)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (args->new_len < args->old_len) {
|
if (args->new_len < args->old_len) {
|
||||||
bsd_args.addr =
|
addr = args->addr + args->new_len;
|
||||||
(caddr_t)((uintptr_t)args->addr + args->new_len);
|
len = args->old_len - args->new_len;
|
||||||
bsd_args.len = args->old_len - args->new_len;
|
error = kern_vm_munmap(td, addr, len);
|
||||||
error = sys_munmap(td, &bsd_args);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
|
td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
|
||||||
@ -640,13 +637,9 @@ linux_mremap(struct thread *td, struct linux_mremap_args *args)
|
|||||||
int
|
int
|
||||||
linux_msync(struct thread *td, struct linux_msync_args *args)
|
linux_msync(struct thread *td, struct linux_msync_args *args)
|
||||||
{
|
{
|
||||||
struct msync_args bsd_args;
|
|
||||||
|
|
||||||
bsd_args.addr = (caddr_t)(uintptr_t)args->addr;
|
return (kern_vm_msync(td, args->addr, args->len,
|
||||||
bsd_args.len = (uintptr_t)args->len;
|
args->fl & ~LINUX_MS_SYNC));
|
||||||
bsd_args.flags = args->fl & ~LINUX_MS_SYNC;
|
|
||||||
|
|
||||||
return (sys_msync(td, &bsd_args));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
|
|||||||
#include <sys/sysproto.h>
|
#include <sys/sysproto.h>
|
||||||
|
|
||||||
#include <vm/pmap.h>
|
#include <vm/pmap.h>
|
||||||
|
#include <vm/vm_extern.h>
|
||||||
#include <vm/vm_map.h>
|
#include <vm/vm_map.h>
|
||||||
|
|
||||||
#include <compat/linux/linux_emul.h>
|
#include <compat/linux/linux_emul.h>
|
||||||
@ -67,15 +68,7 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
|
|||||||
{
|
{
|
||||||
struct proc *p = td->td_proc;
|
struct proc *p = td->td_proc;
|
||||||
struct vmspace *vms = td->td_proc->p_vmspace;
|
struct vmspace *vms = td->td_proc->p_vmspace;
|
||||||
struct mmap_args /* {
|
int bsd_flags, error;
|
||||||
caddr_t addr;
|
|
||||||
size_t len;
|
|
||||||
int prot;
|
|
||||||
int flags;
|
|
||||||
int fd;
|
|
||||||
off_t pos;
|
|
||||||
} */ bsd_args;
|
|
||||||
int error;
|
|
||||||
struct file *fp;
|
struct file *fp;
|
||||||
|
|
||||||
cap_rights_t rights;
|
cap_rights_t rights;
|
||||||
@ -83,7 +76,7 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
|
|||||||
addr, len, prot, flags, fd, pos);
|
addr, len, prot, flags, fd, pos);
|
||||||
|
|
||||||
error = 0;
|
error = 0;
|
||||||
bsd_args.flags = 0;
|
bsd_flags = 0;
|
||||||
fp = NULL;
|
fp = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -94,21 +87,21 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
|
|||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
|
|
||||||
if (flags & LINUX_MAP_SHARED)
|
if (flags & LINUX_MAP_SHARED)
|
||||||
bsd_args.flags |= MAP_SHARED;
|
bsd_flags |= MAP_SHARED;
|
||||||
if (flags & LINUX_MAP_PRIVATE)
|
if (flags & LINUX_MAP_PRIVATE)
|
||||||
bsd_args.flags |= MAP_PRIVATE;
|
bsd_flags |= MAP_PRIVATE;
|
||||||
if (flags & LINUX_MAP_FIXED)
|
if (flags & LINUX_MAP_FIXED)
|
||||||
bsd_args.flags |= MAP_FIXED;
|
bsd_flags |= MAP_FIXED;
|
||||||
if (flags & LINUX_MAP_ANON) {
|
if (flags & LINUX_MAP_ANON) {
|
||||||
/* Enforce pos to be on page boundary, then ignore. */
|
/* Enforce pos to be on page boundary, then ignore. */
|
||||||
if ((pos & PAGE_MASK) != 0)
|
if ((pos & PAGE_MASK) != 0)
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
pos = 0;
|
pos = 0;
|
||||||
bsd_args.flags |= MAP_ANON;
|
bsd_flags |= MAP_ANON;
|
||||||
} else
|
} else
|
||||||
bsd_args.flags |= MAP_NOSYNC;
|
bsd_flags |= MAP_NOSYNC;
|
||||||
if (flags & LINUX_MAP_GROWSDOWN)
|
if (flags & LINUX_MAP_GROWSDOWN)
|
||||||
bsd_args.flags |= MAP_STACK;
|
bsd_flags |= MAP_STACK;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
|
* PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
|
||||||
@ -118,14 +111,13 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
|
|||||||
*
|
*
|
||||||
* XXX. Linux checks that the file system is not mounted with noexec.
|
* XXX. Linux checks that the file system is not mounted with noexec.
|
||||||
*/
|
*/
|
||||||
bsd_args.prot = prot;
|
|
||||||
#if defined(__amd64__)
|
#if defined(__amd64__)
|
||||||
linux_fixup_prot(td, &bsd_args.prot);
|
linux_fixup_prot(td, &prot);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Linux does not check file descriptor when MAP_ANONYMOUS is set. */
|
/* Linux does not check file descriptor when MAP_ANONYMOUS is set. */
|
||||||
bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : fd;
|
fd = (bsd_flags & MAP_ANON) ? -1 : fd;
|
||||||
if (bsd_args.fd != -1) {
|
if (fd != -1) {
|
||||||
/*
|
/*
|
||||||
* Linux follows Solaris mmap(2) description:
|
* Linux follows Solaris mmap(2) description:
|
||||||
* The file descriptor fildes is opened with
|
* The file descriptor fildes is opened with
|
||||||
@ -133,8 +125,7 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
|
|||||||
* protection options specified.
|
* protection options specified.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
error = fget(td, bsd_args.fd,
|
error = fget(td, fd, cap_rights_init(&rights, CAP_MMAP), &fp);
|
||||||
cap_rights_init(&rights, CAP_MMAP), &fp);
|
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
return (error);
|
return (error);
|
||||||
if (fp->f_type != DTYPE_VNODE) {
|
if (fp->f_type != DTYPE_VNODE) {
|
||||||
@ -205,21 +196,13 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
|
|||||||
* we map the full stack, since we don't have a way
|
* we map the full stack, since we don't have a way
|
||||||
* to autogrow it.
|
* to autogrow it.
|
||||||
*/
|
*/
|
||||||
if (len > STACK_SIZE - GUARD_SIZE) {
|
if (len <= STACK_SIZE - GUARD_SIZE) {
|
||||||
bsd_args.addr = (caddr_t)addr;
|
addr = addr - (STACK_SIZE - GUARD_SIZE - len);
|
||||||
bsd_args.len = len;
|
len = STACK_SIZE - GUARD_SIZE;
|
||||||
} else {
|
|
||||||
bsd_args.addr = (caddr_t)addr -
|
|
||||||
(STACK_SIZE - GUARD_SIZE - len);
|
|
||||||
bsd_args.len = STACK_SIZE - GUARD_SIZE;
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
bsd_args.addr = (caddr_t)addr;
|
|
||||||
bsd_args.len = len;
|
|
||||||
}
|
}
|
||||||
bsd_args.pos = pos;
|
|
||||||
|
|
||||||
error = sys_mmap(td, &bsd_args);
|
error = kern_vm_mmap(td, addr, len, prot, bsd_flags, fd, pos);
|
||||||
|
|
||||||
LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]);
|
LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]);
|
||||||
|
|
||||||
@ -229,16 +212,11 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
|
|||||||
int
|
int
|
||||||
linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot)
|
linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot)
|
||||||
{
|
{
|
||||||
struct mprotect_args bsd_args;
|
|
||||||
|
|
||||||
bsd_args.addr = (void *)addr;
|
|
||||||
bsd_args.len = len;
|
|
||||||
bsd_args.prot = prot;
|
|
||||||
|
|
||||||
#if defined(__amd64__)
|
#if defined(__amd64__)
|
||||||
linux_fixup_prot(td, &bsd_args.prot);
|
linux_fixup_prot(td, &prot);
|
||||||
#endif
|
#endif
|
||||||
return (sys_mprotect(td, &bsd_args));
|
return (kern_vm_mprotect(td, addr, len, prot));
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(__amd64__)
|
#if defined(__amd64__)
|
||||||
|
@ -71,6 +71,16 @@ void kmem_init(vm_offset_t, vm_offset_t);
|
|||||||
void kmem_init_zero_region(void);
|
void kmem_init_zero_region(void);
|
||||||
void kmeminit(void);
|
void kmeminit(void);
|
||||||
|
|
||||||
|
int kern_vm_mmap(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||||
|
vm_prot_t prot, int flags, int fd, off_t pos);
|
||||||
|
int kern_vm_mprotect(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||||
|
vm_prot_t prot);
|
||||||
|
int kern_vm_msync(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||||
|
int flags);
|
||||||
|
int kern_vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size);
|
||||||
|
int kern_vm_munmap(struct thread *td, vm_offset_t addr, vm_size_t size);
|
||||||
|
int kern_vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len,
|
||||||
|
int behav);
|
||||||
void swapout_procs(int);
|
void swapout_procs(int);
|
||||||
int kernacc(void *, int, int);
|
int kernacc(void *, int, int);
|
||||||
int useracc(void *, int, int);
|
int useracc(void *, int, int);
|
||||||
|
171
sys/vm/vm_mmap.c
171
sys/vm/vm_mmap.c
@ -187,27 +187,26 @@ struct mmap_args {
|
|||||||
* MPSAFE
|
* MPSAFE
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
sys_mmap(td, uap)
|
sys_mmap(struct thread *td, struct mmap_args *uap)
|
||||||
struct thread *td;
|
{
|
||||||
struct mmap_args *uap;
|
|
||||||
|
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
|
||||||
|
uap->prot, uap->flags, uap->fd, uap->pos));
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
kern_vm_mmap(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||||
|
vm_prot_t prot, int flags, int fd, off_t pos)
|
||||||
{
|
{
|
||||||
struct file *fp;
|
struct file *fp;
|
||||||
vm_offset_t addr;
|
vm_size_t pageoff;
|
||||||
vm_size_t size, pageoff;
|
|
||||||
vm_prot_t cap_maxprot;
|
vm_prot_t cap_maxprot;
|
||||||
int align, error, flags, prot;
|
int align, error;
|
||||||
off_t pos;
|
|
||||||
struct vmspace *vms = td->td_proc->p_vmspace;
|
struct vmspace *vms = td->td_proc->p_vmspace;
|
||||||
cap_rights_t rights;
|
cap_rights_t rights;
|
||||||
|
|
||||||
addr = (vm_offset_t) uap->addr;
|
|
||||||
size = uap->len;
|
|
||||||
prot = uap->prot;
|
|
||||||
flags = uap->flags;
|
|
||||||
pos = uap->pos;
|
|
||||||
|
|
||||||
fp = NULL;
|
fp = NULL;
|
||||||
AUDIT_ARG_FD(uap->fd);
|
AUDIT_ARG_FD(fd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ignore old flags that used to be defined but did not do anything.
|
* Ignore old flags that used to be defined but did not do anything.
|
||||||
@ -224,8 +223,8 @@ sys_mmap(td, uap)
|
|||||||
* pos.
|
* pos.
|
||||||
*/
|
*/
|
||||||
if (!SV_CURPROC_FLAG(SV_AOUT)) {
|
if (!SV_CURPROC_FLAG(SV_AOUT)) {
|
||||||
if ((uap->len == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
|
if ((size == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
|
||||||
((flags & MAP_ANON) != 0 && (uap->fd != -1 || pos != 0)))
|
((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
} else {
|
} else {
|
||||||
if ((flags & MAP_ANON) != 0)
|
if ((flags & MAP_ANON) != 0)
|
||||||
@ -233,7 +232,7 @@ sys_mmap(td, uap)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (flags & MAP_STACK) {
|
if (flags & MAP_STACK) {
|
||||||
if ((uap->fd != -1) ||
|
if ((fd != -1) ||
|
||||||
((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
|
((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
flags |= MAP_ANON;
|
flags |= MAP_ANON;
|
||||||
@ -353,7 +352,7 @@ sys_mmap(td, uap)
|
|||||||
}
|
}
|
||||||
if (prot & PROT_EXEC)
|
if (prot & PROT_EXEC)
|
||||||
cap_rights_set(&rights, CAP_MMAP_X);
|
cap_rights_set(&rights, CAP_MMAP_X);
|
||||||
error = fget_mmap(td, uap->fd, &rights, &cap_maxprot, &fp);
|
error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
goto done;
|
goto done;
|
||||||
if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
|
if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
|
||||||
@ -380,15 +379,9 @@ done:
|
|||||||
int
|
int
|
||||||
freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
|
freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
|
||||||
{
|
{
|
||||||
struct mmap_args oargs;
|
|
||||||
|
|
||||||
oargs.addr = uap->addr;
|
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
|
||||||
oargs.len = uap->len;
|
uap->prot, uap->flags, uap->fd, uap->pos));
|
||||||
oargs.prot = uap->prot;
|
|
||||||
oargs.flags = uap->flags;
|
|
||||||
oargs.fd = uap->fd;
|
|
||||||
oargs.pos = uap->pos;
|
|
||||||
return (sys_mmap(td, &oargs));
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -404,11 +397,8 @@ struct ommap_args {
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
int
|
int
|
||||||
ommap(td, uap)
|
ommap(struct thread *td, struct ommap_args *uap)
|
||||||
struct thread *td;
|
|
||||||
struct ommap_args *uap;
|
|
||||||
{
|
{
|
||||||
struct mmap_args nargs;
|
|
||||||
static const char cvtbsdprot[8] = {
|
static const char cvtbsdprot[8] = {
|
||||||
0,
|
0,
|
||||||
PROT_EXEC,
|
PROT_EXEC,
|
||||||
@ -419,36 +409,34 @@ ommap(td, uap)
|
|||||||
PROT_WRITE | PROT_READ,
|
PROT_WRITE | PROT_READ,
|
||||||
PROT_EXEC | PROT_WRITE | PROT_READ,
|
PROT_EXEC | PROT_WRITE | PROT_READ,
|
||||||
};
|
};
|
||||||
|
int flags, prot;
|
||||||
|
|
||||||
#define OMAP_ANON 0x0002
|
#define OMAP_ANON 0x0002
|
||||||
#define OMAP_COPY 0x0020
|
#define OMAP_COPY 0x0020
|
||||||
#define OMAP_SHARED 0x0010
|
#define OMAP_SHARED 0x0010
|
||||||
#define OMAP_FIXED 0x0100
|
#define OMAP_FIXED 0x0100
|
||||||
|
|
||||||
nargs.addr = uap->addr;
|
prot = cvtbsdprot[uap->prot & 0x7];
|
||||||
nargs.len = uap->len;
|
|
||||||
nargs.prot = cvtbsdprot[uap->prot & 0x7];
|
|
||||||
#ifdef COMPAT_FREEBSD32
|
#ifdef COMPAT_FREEBSD32
|
||||||
#if defined(__amd64__)
|
#if defined(__amd64__)
|
||||||
if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
|
if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
|
||||||
nargs.prot != 0)
|
prot != 0)
|
||||||
nargs.prot |= PROT_EXEC;
|
prot |= PROT_EXEC;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
nargs.flags = 0;
|
flags = 0;
|
||||||
if (uap->flags & OMAP_ANON)
|
if (uap->flags & OMAP_ANON)
|
||||||
nargs.flags |= MAP_ANON;
|
flags |= MAP_ANON;
|
||||||
if (uap->flags & OMAP_COPY)
|
if (uap->flags & OMAP_COPY)
|
||||||
nargs.flags |= MAP_COPY;
|
flags |= MAP_COPY;
|
||||||
if (uap->flags & OMAP_SHARED)
|
if (uap->flags & OMAP_SHARED)
|
||||||
nargs.flags |= MAP_SHARED;
|
flags |= MAP_SHARED;
|
||||||
else
|
else
|
||||||
nargs.flags |= MAP_PRIVATE;
|
flags |= MAP_PRIVATE;
|
||||||
if (uap->flags & OMAP_FIXED)
|
if (uap->flags & OMAP_FIXED)
|
||||||
nargs.flags |= MAP_FIXED;
|
flags |= MAP_FIXED;
|
||||||
nargs.fd = uap->fd;
|
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
|
||||||
nargs.pos = uap->pos;
|
prot, flags, uap->fd, uap->pos));
|
||||||
return (sys_mmap(td, &nargs));
|
|
||||||
}
|
}
|
||||||
#endif /* COMPAT_43 */
|
#endif /* COMPAT_43 */
|
||||||
|
|
||||||
@ -464,20 +452,20 @@ struct msync_args {
|
|||||||
* MPSAFE
|
* MPSAFE
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
sys_msync(td, uap)
|
sys_msync(struct thread *td, struct msync_args *uap)
|
||||||
struct thread *td;
|
|
||||||
struct msync_args *uap;
|
|
||||||
{
|
{
|
||||||
vm_offset_t addr;
|
|
||||||
vm_size_t size, pageoff;
|
return (kern_vm_msync(td, (vm_offset_t)uap->addr, uap->len,
|
||||||
int flags;
|
uap->flags));
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
kern_vm_msync(struct thread *td, vm_offset_t addr, vm_size_t size, int flags)
|
||||||
|
{
|
||||||
|
vm_size_t pageoff;
|
||||||
vm_map_t map;
|
vm_map_t map;
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
addr = (vm_offset_t) uap->addr;
|
|
||||||
size = uap->len;
|
|
||||||
flags = uap->flags;
|
|
||||||
|
|
||||||
pageoff = (addr & PAGE_MASK);
|
pageoff = (addr & PAGE_MASK);
|
||||||
addr -= pageoff;
|
addr -= pageoff;
|
||||||
size += pageoff;
|
size += pageoff;
|
||||||
@ -519,21 +507,23 @@ struct munmap_args {
|
|||||||
* MPSAFE
|
* MPSAFE
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
sys_munmap(td, uap)
|
sys_munmap(struct thread *td, struct munmap_args *uap)
|
||||||
struct thread *td;
|
{
|
||||||
struct munmap_args *uap;
|
|
||||||
|
return (kern_vm_munmap(td, (vm_offset_t)uap->addr, uap->len));
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
kern_vm_munmap(struct thread *td, vm_offset_t addr, vm_size_t size)
|
||||||
{
|
{
|
||||||
#ifdef HWPMC_HOOKS
|
#ifdef HWPMC_HOOKS
|
||||||
struct pmckern_map_out pkm;
|
struct pmckern_map_out pkm;
|
||||||
vm_map_entry_t entry;
|
vm_map_entry_t entry;
|
||||||
bool pmc_handled;
|
bool pmc_handled;
|
||||||
#endif
|
#endif
|
||||||
vm_offset_t addr;
|
vm_size_t pageoff;
|
||||||
vm_size_t size, pageoff;
|
|
||||||
vm_map_t map;
|
vm_map_t map;
|
||||||
|
|
||||||
addr = (vm_offset_t) uap->addr;
|
|
||||||
size = uap->len;
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
|
|
||||||
@ -602,18 +592,20 @@ struct mprotect_args {
|
|||||||
* MPSAFE
|
* MPSAFE
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
sys_mprotect(td, uap)
|
sys_mprotect(struct thread *td, struct mprotect_args *uap)
|
||||||
struct thread *td;
|
|
||||||
struct mprotect_args *uap;
|
|
||||||
{
|
{
|
||||||
vm_offset_t addr;
|
|
||||||
vm_size_t size, pageoff;
|
|
||||||
vm_prot_t prot;
|
|
||||||
|
|
||||||
addr = (vm_offset_t) uap->addr;
|
return (kern_vm_mprotect(td, (vm_offset_t)uap->addr, uap->len,
|
||||||
size = uap->len;
|
uap->prot));
|
||||||
prot = uap->prot & VM_PROT_ALL;
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
kern_vm_mprotect(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||||
|
vm_prot_t prot)
|
||||||
|
{
|
||||||
|
vm_size_t pageoff;
|
||||||
|
|
||||||
|
prot = (prot & VM_PROT_ALL);
|
||||||
pageoff = (addr & PAGE_MASK);
|
pageoff = (addr & PAGE_MASK);
|
||||||
addr -= pageoff;
|
addr -= pageoff;
|
||||||
size += pageoff;
|
size += pageoff;
|
||||||
@ -688,6 +680,14 @@ int
|
|||||||
sys_madvise(td, uap)
|
sys_madvise(td, uap)
|
||||||
struct thread *td;
|
struct thread *td;
|
||||||
struct madvise_args *uap;
|
struct madvise_args *uap;
|
||||||
|
{
|
||||||
|
|
||||||
|
return (kern_vm_madvise(td, (vm_offset_t)uap->addr, uap->len,
|
||||||
|
uap->behav));
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
kern_vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len, int behav)
|
||||||
{
|
{
|
||||||
vm_offset_t start, end;
|
vm_offset_t start, end;
|
||||||
vm_map_t map;
|
vm_map_t map;
|
||||||
@ -697,7 +697,7 @@ sys_madvise(td, uap)
|
|||||||
* Check for our special case, advising the swap pager we are
|
* Check for our special case, advising the swap pager we are
|
||||||
* "immortal."
|
* "immortal."
|
||||||
*/
|
*/
|
||||||
if (uap->behav == MADV_PROTECT) {
|
if (behav == MADV_PROTECT) {
|
||||||
flags = PPROT_SET;
|
flags = PPROT_SET;
|
||||||
return (kern_procctl(td, P_PID, td->td_proc->p_pid,
|
return (kern_procctl(td, P_PID, td->td_proc->p_pid,
|
||||||
PROC_SPROTECT, &flags));
|
PROC_SPROTECT, &flags));
|
||||||
@ -706,27 +706,26 @@ sys_madvise(td, uap)
|
|||||||
/*
|
/*
|
||||||
* Check for illegal behavior
|
* Check for illegal behavior
|
||||||
*/
|
*/
|
||||||
if (uap->behav < 0 || uap->behav > MADV_CORE)
|
if (behav < 0 || behav > MADV_CORE)
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
/*
|
/*
|
||||||
* Check for illegal addresses. Watch out for address wrap... Note
|
* Check for illegal addresses. Watch out for address wrap... Note
|
||||||
* that VM_*_ADDRESS are not constants due to casts (argh).
|
* that VM_*_ADDRESS are not constants due to casts (argh).
|
||||||
*/
|
*/
|
||||||
map = &td->td_proc->p_vmspace->vm_map;
|
map = &td->td_proc->p_vmspace->vm_map;
|
||||||
if ((vm_offset_t)uap->addr < vm_map_min(map) ||
|
if (addr < vm_map_min(map) || addr + len > vm_map_max(map))
|
||||||
(vm_offset_t)uap->addr + uap->len > vm_map_max(map))
|
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
|
if ((addr + len) < addr)
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since this routine is only advisory, we default to conservative
|
* Since this routine is only advisory, we default to conservative
|
||||||
* behavior.
|
* behavior.
|
||||||
*/
|
*/
|
||||||
start = trunc_page((vm_offset_t) uap->addr);
|
start = trunc_page(addr);
|
||||||
end = round_page((vm_offset_t) uap->addr + uap->len);
|
end = round_page(addr + len);
|
||||||
|
|
||||||
if (vm_map_madvise(map, start, end, uap->behav))
|
if (vm_map_madvise(map, start, end, behav))
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
@ -1189,12 +1188,16 @@ struct munlock_args {
|
|||||||
* MPSAFE
|
* MPSAFE
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
sys_munlock(td, uap)
|
sys_munlock(struct thread *td, struct munlock_args *uap)
|
||||||
struct thread *td;
|
|
||||||
struct munlock_args *uap;
|
|
||||||
{
|
{
|
||||||
vm_offset_t addr, end, last, start;
|
|
||||||
vm_size_t size;
|
return (kern_vm_munlock(td, (vm_offset_t)uap->addr, uap->len));
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
kern_vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size)
|
||||||
|
{
|
||||||
|
vm_offset_t end, last, start;
|
||||||
#ifdef RACCT
|
#ifdef RACCT
|
||||||
vm_map_t map;
|
vm_map_t map;
|
||||||
#endif
|
#endif
|
||||||
@ -1203,8 +1206,6 @@ sys_munlock(td, uap)
|
|||||||
error = priv_check(td, PRIV_VM_MUNLOCK);
|
error = priv_check(td, PRIV_VM_MUNLOCK);
|
||||||
if (error)
|
if (error)
|
||||||
return (error);
|
return (error);
|
||||||
addr = (vm_offset_t)uap->addr;
|
|
||||||
size = uap->len;
|
|
||||||
last = addr + size;
|
last = addr + size;
|
||||||
start = trunc_page(addr);
|
start = trunc_page(addr);
|
||||||
end = round_page(last);
|
end = round_page(last);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user