Add a facility to disable processing page faults. When activated,

uiomove generates EFAULT if any accessed address is not mapped, as
opposed to handling the fault.

Sponsored by:	The FreeBSD Foundation
Reviewed by:	alc (previous version)
This commit is contained in:
Konstantin Belousov 2011-07-09 15:21:10 +00:00
parent 58f9394c50
commit 2801687d56
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=223889
6 changed files with 77 additions and 11 deletions

View File

@ -64,6 +64,8 @@ __FBSDID("$FreeBSD$");
SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault);
#ifdef ZERO_COPY_SOCKETS
/* Declared in uipc_socket.c */
extern int so_zero_copy_receive;
@ -128,24 +130,66 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
}
#endif /* ZERO_COPY_SOCKETS */
int
copyin_nofault(const void *udaddr, void *kaddr, size_t len)
{
int error, save;
save = vm_fault_disable_pagefaults();
error = copyin(udaddr, kaddr, len);
vm_fault_enable_pagefaults(save);
return (error);
}
int
copyout_nofault(const void *kaddr, void *udaddr, size_t len)
{
int error, save;
save = vm_fault_disable_pagefaults();
error = copyout(kaddr, udaddr, len);
vm_fault_enable_pagefaults(save);
return (error);
}
int
uiomove(void *cp, int n, struct uio *uio)
{
struct thread *td = curthread;
return (uiomove_faultflag(cp, n, uio, 0));
}
int
uiomove_nofault(void *cp, int n, struct uio *uio)
{
return (uiomove_faultflag(cp, n, uio, 1));
}
static int
uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
{
struct thread *td;
struct iovec *iov;
u_int cnt;
int error = 0;
int save = 0;
int error, newflags, save;
td = curthread;
error = 0;
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
("uiomove: mode"));
KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
("uiomove proc"));
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"Calling uiomove()");
if (!nofault)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"Calling uiomove()");
save = td->td_pflags & TDP_DEADLKTREAT;
td->td_pflags |= TDP_DEADLKTREAT;
/* XXX does it make a sense to set TDP_DEADLKTREAT for UIO_SYSSPACE ? */
newflags = TDP_DEADLKTREAT;
if (uio->uio_segflg == UIO_USERSPACE && nofault)
newflags |= TDP_NOFAULTING;
save = curthread_pflags_set(newflags);
while (n > 0 && uio->uio_resid) {
iov = uio->uio_iov;
@ -187,8 +231,7 @@ uiomove(void *cp, int n, struct uio *uio)
n -= cnt;
}
out:
if (save == 0)
td->td_pflags &= ~TDP_DEADLKTREAT;
curthread_pflags_restore(save);
return (error);
}

View File

@ -393,7 +393,7 @@ do { \
#define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
#define TDP_UNUSED80 0x00000080 /* available. */
#define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */
#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */

View File

@ -217,8 +217,12 @@ int copyinstr(const void * __restrict udaddr, void * __restrict kaddr,
__nonnull(1) __nonnull(2);
int copyin(const void * __restrict udaddr, void * __restrict kaddr,
size_t len) __nonnull(1) __nonnull(2);
int copyin_nofault(const void * __restrict udaddr, void * __restrict kaddr,
size_t len) __nonnull(1) __nonnull(2);
int copyout(const void * __restrict kaddr, void * __restrict udaddr,
size_t len) __nonnull(1) __nonnull(2);
int copyout_nofault(const void * __restrict kaddr, void * __restrict udaddr,
size_t len) __nonnull(1) __nonnull(2);
int fubyte(const void *base);
long fuword(const void *base);

View File

@ -100,6 +100,7 @@ int uiomove(void *cp, int n, struct uio *uio);
int uiomove_frombuf(void *buf, int buflen, struct uio *uio);
int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n,
struct uio *uio);
int uiomove_nofault(void *cp, int n, struct uio *uio);
int uiomoveco(void *cp, int n, struct uio *uio, int disposable);
#else /* !_KERNEL */

View File

@ -61,6 +61,8 @@ int useracc(void *, int, int);
int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t,
vm_ooffset_t *);
int vm_fault_disable_pagefaults(void);
void vm_fault_enable_pagefaults(int save);
int vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags, vm_page_t *m_hold);
int vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,

View File

@ -209,6 +209,8 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
int fault_flags)
{
if ((curthread->td_pflags & TDP_NOFAULTING) != 0)
return (KERN_PROTECTION_FAILURE);
return (vm_fault_hold(map, vaddr, fault_type, fault_flags, NULL));
}
@ -1475,3 +1477,17 @@ vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
/* return number of pages */
return i;
}
int
vm_fault_disable_pagefaults(void)
{
return (curthread_pflags_set(TDP_NOFAULTING));
}
void
vm_fault_enable_pagefaults(int save)
{
curthread_pflags_restore(save);
}