Rework r313352.
Rename kern_vm_* functions to kern_*. Move the prototypes to syscallsubr.h. Also change Mach VM types to uintptr_t/size_t as needed, to avoid headers pollution. Requested by: alc, jhb Reviewed by: alc Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential revision: https://reviews.freebsd.org/D9535
This commit is contained in:
parent
f719d5d44f
commit
0dac2c5955
@ -29,8 +29,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/param.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/proc.h>
|
||||
|
||||
#include <vm/vm_extern.h>
|
||||
#include <sys/syscallsubr.h>
|
||||
|
||||
#include <contrib/cloudabi/cloudabi_types_common.h>
|
||||
|
||||
@ -86,16 +85,16 @@ cloudabi_sys_mem_advise(struct thread *td,
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
return (kern_vm_madvise(td, (vm_offset_t)uap->mapping,
|
||||
uap->mapping_len, behav));
|
||||
return (kern_madvise(td, (uintptr_t)uap->mapping, uap->mapping_len,
|
||||
behav));
|
||||
}
|
||||
|
||||
int
|
||||
cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap)
|
||||
{
|
||||
|
||||
return (vm_mlock(td->td_proc, td->td_ucred, uap->mapping,
|
||||
uap->mapping_len));
|
||||
return (kern_mlock(td->td_proc, td->td_ucred,
|
||||
__DECONST(uintptr_t, uap->mapping), uap->mapping_len));
|
||||
}
|
||||
|
||||
int
|
||||
@ -119,8 +118,8 @@ cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap)
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len, prot,
|
||||
flags, uap->fd, uap->off));
|
||||
return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
|
||||
uap->fd, uap->off));
|
||||
}
|
||||
|
||||
int
|
||||
@ -134,8 +133,8 @@ cloudabi_sys_mem_protect(struct thread *td,
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
return (kern_vm_mprotect(td, (vm_offset_t)uap->mapping,
|
||||
uap->mapping_len, prot));
|
||||
return (kern_mprotect(td, (uintptr_t)uap->mapping, uap->mapping_len,
|
||||
prot));
|
||||
}
|
||||
|
||||
int
|
||||
@ -157,8 +156,8 @@ cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap)
|
||||
if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0)
|
||||
flags |= MS_INVALIDATE;
|
||||
|
||||
return (kern_vm_msync(td, (vm_offset_t)uap->mapping,
|
||||
uap->mapping_len, flags));
|
||||
return (kern_msync(td, (uintptr_t)uap->mapping, uap->mapping_len,
|
||||
flags));
|
||||
}
|
||||
|
||||
int
|
||||
@ -166,7 +165,8 @@ cloudabi_sys_mem_unlock(struct thread *td,
|
||||
struct cloudabi_sys_mem_unlock_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_munlock(td, (vm_offset_t)uap->mapping, uap->mapping_len));
|
||||
return (kern_munlock(td, __DECONST(uintptr_t, uap->mapping),
|
||||
uap->mapping_len));
|
||||
}
|
||||
|
||||
int
|
||||
@ -174,5 +174,5 @@ cloudabi_sys_mem_unmap(struct thread *td,
|
||||
struct cloudabi_sys_mem_unmap_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_munmap(td, (vm_offset_t)uap->mapping, uap->mapping_len));
|
||||
return (kern_munmap(td, (uintptr_t)uap->mapping, uap->mapping_len));
|
||||
}
|
||||
|
@ -456,8 +456,8 @@ freebsd32_mprotect(struct thread *td, struct freebsd32_mprotect_args *uap)
|
||||
if (i386_read_exec && (prot & PROT_READ) != 0)
|
||||
prot |= PROT_EXEC;
|
||||
#endif
|
||||
return (kern_vm_mprotect(td, (vm_offset_t)PTRIN(uap->addr),
|
||||
uap->len, prot));
|
||||
return (kern_mprotect(td, (uintptr_t)PTRIN(uap->addr), uap->len,
|
||||
prot));
|
||||
}
|
||||
|
||||
int
|
||||
@ -471,7 +471,7 @@ freebsd32_mmap(struct thread *td, struct freebsd32_mmap_args *uap)
|
||||
prot |= PROT_EXEC;
|
||||
#endif
|
||||
|
||||
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len, prot,
|
||||
return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot,
|
||||
uap->flags, uap->fd, PAIR32TO64(off_t, uap->pos)));
|
||||
}
|
||||
|
||||
@ -480,17 +480,16 @@ int
|
||||
freebsd6_freebsd32_mmap(struct thread *td,
|
||||
struct freebsd6_freebsd32_mmap_args *uap)
|
||||
{
|
||||
struct freebsd32_mmap_args ap;
|
||||
int prot;
|
||||
|
||||
ap.addr = uap->addr;
|
||||
ap.len = uap->len;
|
||||
ap.prot = uap->prot;
|
||||
ap.flags = uap->flags;
|
||||
ap.fd = uap->fd;
|
||||
ap.pos1 = uap->pos1;
|
||||
ap.pos2 = uap->pos2;
|
||||
prot = uap->prot;
|
||||
#if defined(__amd64__)
|
||||
if (i386_read_exec && (prot & PROT_READ))
|
||||
prot |= PROT_EXEC;
|
||||
#endif
|
||||
|
||||
return (freebsd32_mmap(td, &ap));
|
||||
return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot,
|
||||
uap->flags, uap->fd, PAIR32TO64(off_t, uap->pos)));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -623,7 +623,7 @@ linux_mremap(struct thread *td, struct linux_mremap_args *args)
|
||||
if (args->new_len < args->old_len) {
|
||||
addr = args->addr + args->new_len;
|
||||
len = args->old_len - args->new_len;
|
||||
error = kern_vm_munmap(td, addr, len);
|
||||
error = kern_munmap(td, addr, len);
|
||||
}
|
||||
|
||||
td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
|
||||
@ -638,7 +638,7 @@ int
|
||||
linux_msync(struct thread *td, struct linux_msync_args *args)
|
||||
{
|
||||
|
||||
return (kern_vm_msync(td, args->addr, args->len,
|
||||
return (kern_msync(td, args->addr, args->len,
|
||||
args->fl & ~LINUX_MS_SYNC));
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/mman.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/resourcevar.h>
|
||||
#include <sys/syscallsubr.h>
|
||||
#include <sys/sysent.h>
|
||||
#include <sys/sysproto.h>
|
||||
|
||||
@ -202,7 +203,7 @@ linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot,
|
||||
}
|
||||
}
|
||||
|
||||
error = kern_vm_mmap(td, addr, len, prot, bsd_flags, fd, pos);
|
||||
error = kern_mmap(td, addr, len, prot, bsd_flags, fd, pos);
|
||||
|
||||
LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]);
|
||||
|
||||
@ -216,7 +217,7 @@ linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot)
|
||||
#if defined(__amd64__)
|
||||
linux_fixup_prot(td, &prot);
|
||||
#endif
|
||||
return (kern_vm_mprotect(td, addr, len, prot));
|
||||
return (kern_mprotect(td, addr, len, prot));
|
||||
}
|
||||
|
||||
#if defined(__amd64__)
|
||||
|
@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/proc.h>
|
||||
#include <sys/resourcevar.h>
|
||||
#include <sys/signalvar.h>
|
||||
#include <sys/syscallsubr.h>
|
||||
#include <sys/protosw.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/sema.h>
|
||||
@ -858,12 +859,9 @@ aio_process_mlock(struct kaiocb *job)
|
||||
("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode));
|
||||
|
||||
aio_switch_vmspace(job);
|
||||
error = vm_mlock(job->userproc, job->cred,
|
||||
__DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes);
|
||||
if (error)
|
||||
aio_complete(job, -1, error);
|
||||
else
|
||||
aio_complete(job, 0, 0);
|
||||
error = kern_mlock(job->userproc, job->cred,
|
||||
__DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes);
|
||||
aio_complete(job, error != 0 ? -1 : 0, error);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -149,15 +149,24 @@ int kern_listen(struct thread *td, int s, int backlog);
|
||||
int kern_lseek(struct thread *td, int fd, off_t offset, int whence);
|
||||
int kern_lutimes(struct thread *td, char *path, enum uio_seg pathseg,
|
||||
struct timeval *tptr, enum uio_seg tptrseg);
|
||||
int kern_madvise(struct thread *td, uintptr_t addr, size_t len, int behav);
|
||||
int kern_mkdirat(struct thread *td, int fd, char *path,
|
||||
enum uio_seg segflg, int mode);
|
||||
int kern_mkfifoat(struct thread *td, int fd, char *path,
|
||||
enum uio_seg pathseg, int mode);
|
||||
int kern_mknodat(struct thread *td, int fd, char *path,
|
||||
enum uio_seg pathseg, int mode, int dev);
|
||||
int kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr,
|
||||
size_t len);
|
||||
int kern_mmap(struct thread *td, uintptr_t addr, size_t size, int prot,
|
||||
int flags, int fd, off_t pos);
|
||||
int kern_mprotect(struct thread *td, uintptr_t addr, size_t size, int prot);
|
||||
int kern_msgctl(struct thread *, int, int, struct msqid_ds *);
|
||||
int kern_msgsnd(struct thread *, int, const void *, size_t, int, long);
|
||||
int kern_msgrcv(struct thread *, int, void *, size_t, long, int, long *);
|
||||
int kern_msync(struct thread *td, uintptr_t addr, size_t size, int flags);
|
||||
int kern_munlock(struct thread *td, uintptr_t addr, size_t size);
|
||||
int kern_munmap(struct thread *td, uintptr_t addr, size_t size);
|
||||
int kern_nanosleep(struct thread *td, struct timespec *rqt,
|
||||
struct timespec *rmt);
|
||||
int kern_ogetdirentries(struct thread *td, struct ogetdirentries_args *uap,
|
||||
|
@ -71,16 +71,6 @@ void kmem_init(vm_offset_t, vm_offset_t);
|
||||
void kmem_init_zero_region(void);
|
||||
void kmeminit(void);
|
||||
|
||||
int kern_vm_mmap(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||
int prot, int flags, int fd, off_t pos);
|
||||
int kern_vm_mprotect(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||
vm_prot_t prot);
|
||||
int kern_vm_msync(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||
int flags);
|
||||
int kern_vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size);
|
||||
int kern_vm_munmap(struct thread *td, vm_offset_t addr, vm_size_t size);
|
||||
int kern_vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len,
|
||||
int behav);
|
||||
void swapout_procs(int);
|
||||
int kernacc(void *, int, int);
|
||||
int useracc(void *, int, int);
|
||||
@ -124,6 +114,5 @@ struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
|
||||
void vm_imgact_unmap_page(struct sf_buf *sf);
|
||||
void vm_thread_dispose(struct thread *td);
|
||||
int vm_thread_new(struct thread *td, int pages);
|
||||
int vm_mlock(struct proc *, struct ucred *, const void *, size_t);
|
||||
#endif /* _KERNEL */
|
||||
#endif /* !_VM_EXTERN_H_ */
|
||||
|
@ -173,23 +173,26 @@ int
|
||||
sys_mmap(struct thread *td, struct mmap_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
|
||||
uap->prot, uap->flags, uap->fd, uap->pos));
|
||||
return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
|
||||
uap->flags, uap->fd, uap->pos));
|
||||
}
|
||||
|
||||
int
|
||||
kern_vm_mmap(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||
int prot, int flags, int fd, off_t pos)
|
||||
kern_mmap(struct thread *td, uintptr_t addr0, size_t size, int prot, int flags,
|
||||
int fd, off_t pos)
|
||||
{
|
||||
struct vmspace *vms;
|
||||
struct file *fp;
|
||||
vm_offset_t addr;
|
||||
vm_size_t pageoff;
|
||||
vm_prot_t cap_maxprot;
|
||||
int align, error;
|
||||
struct vmspace *vms = td->td_proc->p_vmspace;
|
||||
cap_rights_t rights;
|
||||
|
||||
vms = td->td_proc->p_vmspace;
|
||||
fp = NULL;
|
||||
AUDIT_ARG_FD(fd);
|
||||
addr = addr0;
|
||||
|
||||
/*
|
||||
* Ignore old flags that used to be defined but did not do anything.
|
||||
@ -363,8 +366,8 @@ int
|
||||
freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
|
||||
uap->prot, uap->flags, uap->fd, uap->pos));
|
||||
return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
|
||||
uap->flags, uap->fd, uap->pos));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -418,8 +421,8 @@ ommap(struct thread *td, struct ommap_args *uap)
|
||||
flags |= MAP_PRIVATE;
|
||||
if (uap->flags & OMAP_FIXED)
|
||||
flags |= MAP_FIXED;
|
||||
return (kern_vm_mmap(td, (vm_offset_t)uap->addr, uap->len,
|
||||
prot, flags, uap->fd, uap->pos));
|
||||
return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
|
||||
uap->fd, uap->pos));
|
||||
}
|
||||
#endif /* COMPAT_43 */
|
||||
|
||||
@ -435,17 +438,18 @@ int
|
||||
sys_msync(struct thread *td, struct msync_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_msync(td, (vm_offset_t)uap->addr, uap->len,
|
||||
uap->flags));
|
||||
return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
|
||||
}
|
||||
|
||||
int
|
||||
kern_vm_msync(struct thread *td, vm_offset_t addr, vm_size_t size, int flags)
|
||||
kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
|
||||
{
|
||||
vm_offset_t addr;
|
||||
vm_size_t pageoff;
|
||||
vm_map_t map;
|
||||
int rv;
|
||||
|
||||
addr = addr0;
|
||||
pageoff = (addr & PAGE_MASK);
|
||||
addr -= pageoff;
|
||||
size += pageoff;
|
||||
@ -487,23 +491,25 @@ int
|
||||
sys_munmap(struct thread *td, struct munmap_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_munmap(td, (vm_offset_t)uap->addr, uap->len));
|
||||
return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
|
||||
}
|
||||
|
||||
int
|
||||
kern_vm_munmap(struct thread *td, vm_offset_t addr, vm_size_t size)
|
||||
kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
|
||||
{
|
||||
#ifdef HWPMC_HOOKS
|
||||
struct pmckern_map_out pkm;
|
||||
vm_map_entry_t entry;
|
||||
bool pmc_handled;
|
||||
#endif
|
||||
vm_offset_t addr;
|
||||
vm_size_t pageoff;
|
||||
vm_map_t map;
|
||||
|
||||
if (size == 0)
|
||||
return (EINVAL);
|
||||
|
||||
addr = addr0;
|
||||
pageoff = (addr & PAGE_MASK);
|
||||
addr -= pageoff;
|
||||
size += pageoff;
|
||||
@ -569,16 +575,16 @@ int
|
||||
sys_mprotect(struct thread *td, struct mprotect_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_mprotect(td, (vm_offset_t)uap->addr, uap->len,
|
||||
uap->prot));
|
||||
return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot));
|
||||
}
|
||||
|
||||
int
|
||||
kern_vm_mprotect(struct thread *td, vm_offset_t addr, vm_size_t size,
|
||||
vm_prot_t prot)
|
||||
kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
|
||||
{
|
||||
vm_offset_t addr;
|
||||
vm_size_t pageoff;
|
||||
|
||||
addr = addr0;
|
||||
prot = (prot & VM_PROT_ALL);
|
||||
pageoff = (addr & PAGE_MASK);
|
||||
addr -= pageoff;
|
||||
@ -646,15 +652,14 @@ int
|
||||
sys_madvise(struct thread *td, struct madvise_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_madvise(td, (vm_offset_t)uap->addr, uap->len,
|
||||
uap->behav));
|
||||
return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
|
||||
}
|
||||
|
||||
int
|
||||
kern_vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len, int behav)
|
||||
kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
|
||||
{
|
||||
vm_offset_t start, end;
|
||||
vm_map_t map;
|
||||
vm_offset_t addr, end, start;
|
||||
int flags;
|
||||
|
||||
/*
|
||||
@ -677,6 +682,7 @@ kern_vm_madvise(struct thread *td, vm_offset_t addr, vm_size_t len, int behav)
|
||||
* that VM_*_ADDRESS are not constants due to casts (argh).
|
||||
*/
|
||||
map = &td->td_proc->p_vmspace->vm_map;
|
||||
addr = addr0;
|
||||
if (addr < vm_map_min(map) || addr + len > vm_map_max(map))
|
||||
return (EINVAL);
|
||||
if ((addr + len) < addr)
|
||||
@ -957,11 +963,12 @@ int
|
||||
sys_mlock(struct thread *td, struct mlock_args *uap)
|
||||
{
|
||||
|
||||
return (vm_mlock(td->td_proc, td->td_ucred, uap->addr, uap->len));
|
||||
return (kern_mlock(td->td_proc, td->td_ucred,
|
||||
__DECONST(uintptr_t, uap->addr), uap->len));
|
||||
}
|
||||
|
||||
int
|
||||
vm_mlock(struct proc *proc, struct ucred *cred, const void *addr0, size_t len)
|
||||
kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
|
||||
{
|
||||
vm_offset_t addr, end, last, start;
|
||||
vm_size_t npages, size;
|
||||
@ -972,7 +979,7 @@ vm_mlock(struct proc *proc, struct ucred *cred, const void *addr0, size_t len)
|
||||
error = priv_check_cred(cred, PRIV_VM_MLOCK, 0);
|
||||
if (error)
|
||||
return (error);
|
||||
addr = (vm_offset_t)addr0;
|
||||
addr = addr0;
|
||||
size = len;
|
||||
last = addr + size;
|
||||
start = trunc_page(addr);
|
||||
@ -1132,13 +1139,13 @@ int
|
||||
sys_munlock(struct thread *td, struct munlock_args *uap)
|
||||
{
|
||||
|
||||
return (kern_vm_munlock(td, (vm_offset_t)uap->addr, uap->len));
|
||||
return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
|
||||
}
|
||||
|
||||
int
|
||||
kern_vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size)
|
||||
kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
|
||||
{
|
||||
vm_offset_t end, last, start;
|
||||
vm_offset_t addr, end, last, start;
|
||||
#ifdef RACCT
|
||||
vm_map_t map;
|
||||
#endif
|
||||
@ -1147,6 +1154,7 @@ kern_vm_munlock(struct thread *td, vm_offset_t addr, vm_size_t size)
|
||||
error = priv_check(td, PRIV_VM_MUNLOCK);
|
||||
if (error)
|
||||
return (error);
|
||||
addr = addr0;
|
||||
last = addr + size;
|
||||
start = trunc_page(addr);
|
||||
end = round_page(last);
|
||||
|
Loading…
Reference in New Issue
Block a user