freebsd-nq/sys/kern/sys_generic.c

1241 lines
26 KiB
C
Raw Normal View History

1994-05-24 10:09:53 +00:00
/*
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
*/
2003-06-11 00:56:59 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_ktrace.h"
1994-05-24 10:09:53 +00:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysproto.h>
1994-05-24 10:09:53 +00:00
#include <sys/filedesc.h>
#include <sys/filio.h>
#include <sys/fcntl.h>
1994-05-24 10:09:53 +00:00
#include <sys/file.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
1994-05-24 10:09:53 +00:00
#include <sys/socketvar.h>
#include <sys/uio.h>
#include <sys/kernel.h>
#include <sys/limits.h>
1994-05-24 10:09:53 +00:00
#include <sys/malloc.h>
#include <sys/poll.h>
#include <sys/resourcevar.h>
#include <sys/selinfo.h>
Switch the sleep/wakeup and condition variable implementations to use the sleep queue interface: - Sleep queues attempt to merge some of the benefits of both sleep queues and condition variables. Having sleep qeueus in a hash table avoids having to allocate a queue head for each wait channel. Thus, struct cv has shrunk down to just a single char * pointer now. However, the hash table does not hold threads directly, but queue heads. This means that once you have located a queue in the hash bucket, you no longer have to walk the rest of the hash chain looking for threads. Instead, you have a list of all the threads sleeping on that wait channel. - Outside of the sleepq code and the sleep/cv code the kernel no longer differentiates between cv's and sleep/wakeup. For example, calls to abortsleep() and cv_abort() are replaced with a call to sleepq_abort(). Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and cv_waitq_remove() have been replaced with calls to sleepq_remove(). - The sched_sleep() function no longer accepts a priority argument as sleep's no longer inherently bump the priority. Instead, this is soley a propery of msleep() which explicitly calls sched_prio() before blocking. - The TDF_ONSLEEPQ flag has been dropped as it was never used. The associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been dropped and replaced with a single explicit clearing of td_wchan. TD_SET_ONSLEEPQ() would really have only made sense if it had taken the wait channel and message as arguments anyway. Now that that only happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
#include <sys/sleepqueue.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
#include <sys/vnode.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/condvar.h>
1994-05-24 10:09:53 +00:00
#ifdef KTRACE
#include <sys/ktrace.h>
#endif
#include <vm/vm.h>
#include <vm/vm_page.h>
1994-05-24 10:09:53 +00:00
static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
MALLOC_DEFINE(M_IOV, "iov", "large iov's");
2002-03-09 22:44:37 +00:00
static int pollscan(struct thread *, struct pollfd *, u_int);
static int selscan(struct thread *, fd_mask **, fd_mask **, int);
static int dofileread(struct thread *, struct file *, int, void *,
size_t, off_t, int);
static int dofilewrite(struct thread *, struct file *, int,
const void *, size_t, off_t, int);
static void doselwakeup(struct selinfo *, int);
1994-05-24 10:09:53 +00:00
/*
* Read system call.
*/
#ifndef _SYS_SYSPROTO_H_
1994-05-24 10:09:53 +00:00
struct read_args {
int fd;
void *buf;
size_t nbyte;
1994-05-24 10:09:53 +00:00
};
#endif
/*
* MPSAFE
*/
int
read(td, uap)
struct thread *td;
struct read_args *uap;
1994-05-24 10:09:53 +00:00
{
struct file *fp;
int error;
1994-05-24 10:09:53 +00:00
if ((error = fget_read(td, uap->fd, &fp)) == 0) {
error = dofileread(td, fp, uap->fd, uap->buf,
uap->nbyte, (off_t)-1, 0);
fdrop(fp, td);
}
return(error);
1994-05-24 10:09:53 +00:00
}
/*
* Pread system call
*/
#ifndef _SYS_SYSPROTO_H_
struct pread_args {
int fd;
void *buf;
size_t nbyte;
int pad;
off_t offset;
};
#endif
/*
* MPSAFE
*/
int
pread(td, uap)
struct thread *td;
struct pread_args *uap;
{
struct file *fp;
int error;
if ((error = fget_read(td, uap->fd, &fp)) != 0)
return (error);
if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
error = ESPIPE;
else if (uap->offset < 0 && fp->f_vnode->v_type != VCHR)
error = EINVAL;
else {
error = dofileread(td, fp, uap->fd, uap->buf, uap->nbyte,
uap->offset, FOF_OFFSET);
}
fdrop(fp, td);
return(error);
}
/*
* Code common for read and pread
*/
static int
dofileread(td, fp, fd, buf, nbyte, offset, flags)
struct thread *td;
struct file *fp;
int fd, flags;
void *buf;
size_t nbyte;
off_t offset;
{
struct uio auio;
struct iovec aiov;
long cnt, error = 0;
#ifdef KTRACE
struct iovec ktriov;
struct uio ktruio;
int didktr = 0;
#endif
2002-07-22 15:44:27 +00:00
aiov.iov_base = buf;
aiov.iov_len = nbyte;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = offset;
if (nbyte > INT_MAX)
return (EINVAL);
auio.uio_resid = nbyte;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_td = td;
#ifdef KTRACE
/*
* if tracing, save a copy of iovec
*/
2002-06-07 05:37:18 +00:00
if (KTRPOINT(td, KTR_GENIO)) {
ktriov = aiov;
ktruio = auio;
didktr = 1;
}
#endif
cnt = nbyte;
In order to better support flexible and extensible access control, make a series of modifications to the credential arguments relating to file read and write operations to cliarfy which credential is used for what: - Change fo_read() and fo_write() to accept "active_cred" instead of "cred", and change the semantics of consumers of fo_read() and fo_write() to pass the active credential of the thread requesting an operation rather than the cached file cred. The cached file cred is still available in fo_read() and fo_write() consumers via fp->f_cred. These changes largely in sys_generic.c. For each implementation of fo_read() and fo_write(), update cred usage to reflect this change and maintain current semantics: - badfo_readwrite() unchanged - kqueue_read/write() unchanged pipe_read/write() now authorize MAC using active_cred rather than td->td_ucred - soo_read/write() unchanged - vn_read/write() now authorize MAC using active_cred but VOP_READ/WRITE() with fp->f_cred Modify vn_rdwr() to accept two credential arguments instead of a single credential: active_cred and file_cred. Use active_cred for MAC authorization, and select a credential for use in VOP_READ/WRITE() based on whether file_cred is NULL or not. If file_cred is provided, authorize the VOP using that cred, otherwise the active credential, matching current semantics. Modify current vn_rdwr() consumers to pass a file_cred if used in the context of a struct file, and to always pass active_cred. When vn_rdwr() is used without a file_cred, pass NOCRED. These changes should maintain current semantics for read/write, but avoid a redundant passing of fp->f_cred, as well as making it more clear what the origin of each credential is in file descriptor read/write operations. Follow-up commits will make similar changes to other file descriptor operations, and modify the MAC framework to pass both credentials to MAC policy modules so they can implement either semantic for revocation. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
if ((error = fo_read(fp, &auio, td->td_ucred, flags, td))) {
if (auio.uio_resid != cnt && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
}
cnt -= auio.uio_resid;
#ifdef KTRACE
if (didktr && error == 0) {
ktruio.uio_iov = &ktriov;
ktruio.uio_resid = cnt;
2002-06-07 05:37:18 +00:00
ktrgenio(fd, UIO_READ, &ktruio, error);
}
#endif
td->td_retval[0] = cnt;
return (error);
}
1994-05-24 10:09:53 +00:00
/*
* Scatter read system call.
*/
#ifndef _SYS_SYSPROTO_H_
1994-05-24 10:09:53 +00:00
struct readv_args {
int fd;
1994-05-24 10:09:53 +00:00
struct iovec *iovp;
u_int iovcnt;
};
#endif
/*
* MPSAFE
*/
int
readv(td, uap)
struct thread *td;
struct readv_args *uap;
1994-05-24 10:09:53 +00:00
{
struct file *fp;
1994-05-24 10:09:53 +00:00
struct uio auio;
struct iovec *iov;
1994-05-24 10:09:53 +00:00
struct iovec *needfree;
struct iovec aiov[UIO_SMALLIOV];
long i, cnt;
int error;
1994-05-24 10:09:53 +00:00
u_int iovlen;
#ifdef KTRACE
struct iovec *ktriov = NULL;
struct uio ktruio;
1994-05-24 10:09:53 +00:00
#endif
if ((error = fget_read(td, uap->fd, &fp)) != 0)
return (error);
needfree = NULL;
1994-05-24 10:09:53 +00:00
/* note: can't use iovlen until iovcnt is validated */
iovlen = uap->iovcnt * sizeof (struct iovec);
if (uap->iovcnt > UIO_SMALLIOV) {
if (uap->iovcnt > UIO_MAXIOV) {
error = EINVAL;
goto done;
}
MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
1994-05-24 10:09:53 +00:00
needfree = iov;
} else
1994-05-24 10:09:53 +00:00
iov = aiov;
auio.uio_iov = iov;
auio.uio_iovcnt = uap->iovcnt;
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_td = td;
auio.uio_offset = -1;
2002-07-22 15:44:27 +00:00
if ((error = copyin(uap->iovp, iov, iovlen)))
1994-05-24 10:09:53 +00:00
goto done;
auio.uio_resid = 0;
for (i = 0; i < uap->iovcnt; i++) {
if (iov->iov_len > INT_MAX - auio.uio_resid) {
1994-05-24 10:09:53 +00:00
error = EINVAL;
goto done;
}
auio.uio_resid += iov->iov_len;
1994-05-24 10:09:53 +00:00
iov++;
}
#ifdef KTRACE
/*
* if tracing, save a copy of iovec
*/
2002-06-07 05:37:18 +00:00
if (KTRPOINT(td, KTR_GENIO)) {
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
2002-07-22 15:44:27 +00:00
bcopy(auio.uio_iov, ktriov, iovlen);
ktruio = auio;
1994-05-24 10:09:53 +00:00
}
#endif
cnt = auio.uio_resid;
In order to better support flexible and extensible access control, make a series of modifications to the credential arguments relating to file read and write operations to cliarfy which credential is used for what: - Change fo_read() and fo_write() to accept "active_cred" instead of "cred", and change the semantics of consumers of fo_read() and fo_write() to pass the active credential of the thread requesting an operation rather than the cached file cred. The cached file cred is still available in fo_read() and fo_write() consumers via fp->f_cred. These changes largely in sys_generic.c. For each implementation of fo_read() and fo_write(), update cred usage to reflect this change and maintain current semantics: - badfo_readwrite() unchanged - kqueue_read/write() unchanged pipe_read/write() now authorize MAC using active_cred rather than td->td_ucred - soo_read/write() unchanged - vn_read/write() now authorize MAC using active_cred but VOP_READ/WRITE() with fp->f_cred Modify vn_rdwr() to accept two credential arguments instead of a single credential: active_cred and file_cred. Use active_cred for MAC authorization, and select a credential for use in VOP_READ/WRITE() based on whether file_cred is NULL or not. If file_cred is provided, authorize the VOP using that cred, otherwise the active credential, matching current semantics. Modify current vn_rdwr() consumers to pass a file_cred if used in the context of a struct file, and to always pass active_cred. When vn_rdwr() is used without a file_cred, pass NOCRED. These changes should maintain current semantics for read/write, but avoid a redundant passing of fp->f_cred, as well as making it more clear what the origin of each credential is in file descriptor read/write operations. Follow-up commits will make similar changes to other file descriptor operations, and modify the MAC framework to pass both credentials to MAC policy modules so they can implement either semantic for revocation. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
if ((error = fo_read(fp, &auio, td->td_ucred, 0, td))) {
1994-05-24 10:09:53 +00:00
if (auio.uio_resid != cnt && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
}
1994-05-24 10:09:53 +00:00
cnt -= auio.uio_resid;
#ifdef KTRACE
if (ktriov != NULL) {
if (error == 0) {
ktruio.uio_iov = ktriov;
ktruio.uio_resid = cnt;
2002-06-07 05:37:18 +00:00
ktrgenio(uap->fd, UIO_READ, &ktruio, error);
}
1994-05-24 10:09:53 +00:00
FREE(ktriov, M_TEMP);
}
#endif
td->td_retval[0] = cnt;
1994-05-24 10:09:53 +00:00
done:
fdrop(fp, td);
1994-05-24 10:09:53 +00:00
if (needfree)
FREE(needfree, M_IOV);
return (error);
}
/*
* Write system call
*/
#ifndef _SYS_SYSPROTO_H_
1994-05-24 10:09:53 +00:00
struct write_args {
int fd;
const void *buf;
size_t nbyte;
1994-05-24 10:09:53 +00:00
};
#endif
/*
* MPSAFE
*/
int
write(td, uap)
struct thread *td;
struct write_args *uap;
1994-05-24 10:09:53 +00:00
{
struct file *fp;
int error;
1994-05-24 10:09:53 +00:00
if ((error = fget_write(td, uap->fd, &fp)) == 0) {
error = dofilewrite(td, fp, uap->fd, uap->buf, uap->nbyte,
(off_t)-1, 0);
fdrop(fp, td);
} else {
error = EBADF; /* XXX this can't be right */
}
return(error);
1994-05-24 10:09:53 +00:00
}
/*
* Pwrite system call
*/
#ifndef _SYS_SYSPROTO_H_
struct pwrite_args {
int fd;
const void *buf;
size_t nbyte;
int pad;
off_t offset;
};
#endif
/*
* MPSAFE
*/
int
pwrite(td, uap)
struct thread *td;
struct pwrite_args *uap;
{
struct file *fp;
int error;
if ((error = fget_write(td, uap->fd, &fp)) == 0) {
if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
error = ESPIPE;
else if (uap->offset < 0 && fp->f_vnode->v_type != VCHR)
error = EINVAL;
else {
error = dofilewrite(td, fp, uap->fd, uap->buf,
uap->nbyte, uap->offset, FOF_OFFSET);
}
fdrop(fp, td);
} else {
error = EBADF; /* this can't be right */
}
return(error);
}
static int
dofilewrite(td, fp, fd, buf, nbyte, offset, flags)
struct thread *td;
struct file *fp;
int fd, flags;
const void *buf;
size_t nbyte;
off_t offset;
{
struct uio auio;
struct iovec aiov;
long cnt, error = 0;
#ifdef KTRACE
struct iovec ktriov;
struct uio ktruio;
int didktr = 0;
#endif
aiov.iov_base = (void *)(uintptr_t)buf;
aiov.iov_len = nbyte;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = offset;
if (nbyte > INT_MAX)
return (EINVAL);
auio.uio_resid = nbyte;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_td = td;
#ifdef KTRACE
/*
* if tracing, save a copy of iovec and uio
*/
2002-06-07 05:37:18 +00:00
if (KTRPOINT(td, KTR_GENIO)) {
ktriov = aiov;
ktruio = auio;
didktr = 1;
}
#endif
cnt = nbyte;
if (fp->f_type == DTYPE_VNODE)
bwillwrite();
In order to better support flexible and extensible access control, make a series of modifications to the credential arguments relating to file read and write operations to cliarfy which credential is used for what: - Change fo_read() and fo_write() to accept "active_cred" instead of "cred", and change the semantics of consumers of fo_read() and fo_write() to pass the active credential of the thread requesting an operation rather than the cached file cred. The cached file cred is still available in fo_read() and fo_write() consumers via fp->f_cred. These changes largely in sys_generic.c. For each implementation of fo_read() and fo_write(), update cred usage to reflect this change and maintain current semantics: - badfo_readwrite() unchanged - kqueue_read/write() unchanged pipe_read/write() now authorize MAC using active_cred rather than td->td_ucred - soo_read/write() unchanged - vn_read/write() now authorize MAC using active_cred but VOP_READ/WRITE() with fp->f_cred Modify vn_rdwr() to accept two credential arguments instead of a single credential: active_cred and file_cred. Use active_cred for MAC authorization, and select a credential for use in VOP_READ/WRITE() based on whether file_cred is NULL or not. If file_cred is provided, authorize the VOP using that cred, otherwise the active credential, matching current semantics. Modify current vn_rdwr() consumers to pass a file_cred if used in the context of a struct file, and to always pass active_cred. When vn_rdwr() is used without a file_cred, pass NOCRED. These changes should maintain current semantics for read/write, but avoid a redundant passing of fp->f_cred, as well as making it more clear what the origin of each credential is in file descriptor read/write operations. Follow-up commits will make similar changes to other file descriptor operations, and modify the MAC framework to pass both credentials to MAC policy modules so they can implement either semantic for revocation. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
if ((error = fo_write(fp, &auio, td->td_ucred, flags, td))) {
if (auio.uio_resid != cnt && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
/* Socket layer is responsible for issuing SIGPIPE. */
if (error == EPIPE && fp->f_type != DTYPE_SOCKET) {
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGPIPE);
PROC_UNLOCK(td->td_proc);
}
}
cnt -= auio.uio_resid;
#ifdef KTRACE
if (didktr && error == 0) {
ktruio.uio_iov = &ktriov;
ktruio.uio_resid = cnt;
2002-06-07 05:37:18 +00:00
ktrgenio(fd, UIO_WRITE, &ktruio, error);
}
#endif
td->td_retval[0] = cnt;
return (error);
}
1994-05-24 10:09:53 +00:00
/*
* Gather write system call
*/
#ifndef _SYS_SYSPROTO_H_
1994-05-24 10:09:53 +00:00
struct writev_args {
int fd;
struct iovec *iovp;
u_int iovcnt;
};
#endif
/*
* MPSAFE
*/
int
writev(td, uap)
struct thread *td;
1994-05-24 10:09:53 +00:00
register struct writev_args *uap;
{
struct file *fp;
1994-05-24 10:09:53 +00:00
struct uio auio;
register struct iovec *iov;
struct iovec *needfree;
struct iovec aiov[UIO_SMALLIOV];
long i, cnt, error = 0;
u_int iovlen;
#ifdef KTRACE
struct iovec *ktriov = NULL;
struct uio ktruio;
1994-05-24 10:09:53 +00:00
#endif
if ((error = fget_write(td, uap->fd, &fp)) != 0)
return (EBADF);
needfree = NULL;
1994-05-24 10:09:53 +00:00
/* note: can't use iovlen until iovcnt is validated */
iovlen = uap->iovcnt * sizeof (struct iovec);
if (uap->iovcnt > UIO_SMALLIOV) {
if (uap->iovcnt > UIO_MAXIOV) {
error = EINVAL;
goto done;
}
MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
1994-05-24 10:09:53 +00:00
needfree = iov;
} else
1994-05-24 10:09:53 +00:00
iov = aiov;
auio.uio_iov = iov;
auio.uio_iovcnt = uap->iovcnt;
auio.uio_rw = UIO_WRITE;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_td = td;
auio.uio_offset = -1;
2002-07-22 15:44:27 +00:00
if ((error = copyin(uap->iovp, iov, iovlen)))
1994-05-24 10:09:53 +00:00
goto done;
auio.uio_resid = 0;
for (i = 0; i < uap->iovcnt; i++) {
if (iov->iov_len > INT_MAX - auio.uio_resid) {
1994-05-24 10:09:53 +00:00
error = EINVAL;
goto done;
}
auio.uio_resid += iov->iov_len;
1994-05-24 10:09:53 +00:00
iov++;
}
#ifdef KTRACE
/*
* if tracing, save a copy of iovec and uio
1994-05-24 10:09:53 +00:00
*/
2002-06-07 05:37:18 +00:00
if (KTRPOINT(td, KTR_GENIO)) {
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
2002-07-22 15:44:27 +00:00
bcopy(auio.uio_iov, ktriov, iovlen);
ktruio = auio;
1994-05-24 10:09:53 +00:00
}
#endif
cnt = auio.uio_resid;
if (fp->f_type == DTYPE_VNODE)
bwillwrite();
In order to better support flexible and extensible access control, make a series of modifications to the credential arguments relating to file read and write operations to cliarfy which credential is used for what: - Change fo_read() and fo_write() to accept "active_cred" instead of "cred", and change the semantics of consumers of fo_read() and fo_write() to pass the active credential of the thread requesting an operation rather than the cached file cred. The cached file cred is still available in fo_read() and fo_write() consumers via fp->f_cred. These changes largely in sys_generic.c. For each implementation of fo_read() and fo_write(), update cred usage to reflect this change and maintain current semantics: - badfo_readwrite() unchanged - kqueue_read/write() unchanged pipe_read/write() now authorize MAC using active_cred rather than td->td_ucred - soo_read/write() unchanged - vn_read/write() now authorize MAC using active_cred but VOP_READ/WRITE() with fp->f_cred Modify vn_rdwr() to accept two credential arguments instead of a single credential: active_cred and file_cred. Use active_cred for MAC authorization, and select a credential for use in VOP_READ/WRITE() based on whether file_cred is NULL or not. If file_cred is provided, authorize the VOP using that cred, otherwise the active credential, matching current semantics. Modify current vn_rdwr() consumers to pass a file_cred if used in the context of a struct file, and to always pass active_cred. When vn_rdwr() is used without a file_cred, pass NOCRED. These changes should maintain current semantics for read/write, but avoid a redundant passing of fp->f_cred, as well as making it more clear what the origin of each credential is in file descriptor read/write operations. Follow-up commits will make similar changes to other file descriptor operations, and modify the MAC framework to pass both credentials to MAC policy modules so they can implement either semantic for revocation. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
if ((error = fo_write(fp, &auio, td->td_ucred, 0, td))) {
1994-05-24 10:09:53 +00:00
if (auio.uio_resid != cnt && (error == ERESTART ||
error == EINTR || error == EWOULDBLOCK))
error = 0;
if (error == EPIPE) {
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGPIPE);
PROC_UNLOCK(td->td_proc);
}
1994-05-24 10:09:53 +00:00
}
cnt -= auio.uio_resid;
#ifdef KTRACE
if (ktriov != NULL) {
if (error == 0) {
ktruio.uio_iov = ktriov;
ktruio.uio_resid = cnt;
2002-06-07 05:37:18 +00:00
ktrgenio(uap->fd, UIO_WRITE, &ktruio, error);
}
1994-05-24 10:09:53 +00:00
FREE(ktriov, M_TEMP);
}
#endif
td->td_retval[0] = cnt;
1994-05-24 10:09:53 +00:00
done:
fdrop(fp, td);
1994-05-24 10:09:53 +00:00
if (needfree)
FREE(needfree, M_IOV);
return (error);
}
/*
* Ioctl system call
*/
#ifndef _SYS_SYSPROTO_H_
1994-05-24 10:09:53 +00:00
struct ioctl_args {
int fd;
u_long com;
1994-05-24 10:09:53 +00:00
caddr_t data;
};
#endif
/*
* MPSAFE
*/
1994-05-24 10:09:53 +00:00
/* ARGSUSED */
int
ioctl(td, uap)
struct thread *td;
1994-05-24 10:09:53 +00:00
register struct ioctl_args *uap;
{
struct file *fp;
1994-05-24 10:09:53 +00:00
register struct filedesc *fdp;
register u_long com;
int error = 0;
1994-05-24 10:09:53 +00:00
register u_int size;
caddr_t data, memp;
int tmp;
#define STK_PARAMS 128
union {
char stkbuf[STK_PARAMS];
long align;
} ubuf;
1994-05-24 10:09:53 +00:00
if ((error = fget(td, uap->fd, &fp)) != 0)
return (error);
mtx_lock(&Giant);
if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
fdrop(fp, td);
mtx_unlock(&Giant);
return (EBADF);
}
fdp = td->td_proc->p_fd;
1994-05-24 10:09:53 +00:00
switch (com = uap->com) {
case FIONCLEX:
FILEDESC_LOCK(fdp);
1994-05-24 10:09:53 +00:00
fdp->fd_ofileflags[uap->fd] &= ~UF_EXCLOSE;
FILEDESC_UNLOCK(fdp);
fdrop(fp, td);
mtx_unlock(&Giant);
return (0);
1994-05-24 10:09:53 +00:00
case FIOCLEX:
FILEDESC_LOCK(fdp);
1994-05-24 10:09:53 +00:00
fdp->fd_ofileflags[uap->fd] |= UF_EXCLOSE;
FILEDESC_UNLOCK(fdp);
fdrop(fp, td);
mtx_unlock(&Giant);
return (0);
1994-05-24 10:09:53 +00:00
}
/*
* Interpret high order word to find amount of data to be
* copied to/from the user's address space.
*/
size = IOCPARM_LEN(com);
if (size > IOCPARM_MAX) {
fdrop(fp, td);
mtx_unlock(&Giant);
return (ENOTTY);
}
1994-05-24 10:09:53 +00:00
memp = NULL;
if (size > sizeof (ubuf.stkbuf)) {
memp = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
1994-05-24 10:09:53 +00:00
data = memp;
} else {
data = ubuf.stkbuf;
}
1994-05-24 10:09:53 +00:00
if (com&IOC_IN) {
if (size) {
error = copyin(uap->data, data, (u_int)size);
if (error) {
if (memp)
free(memp, M_IOCTLOPS);
fdrop(fp, td);
goto done;
1994-05-24 10:09:53 +00:00
}
} else {
1994-05-24 10:09:53 +00:00
*(caddr_t *)data = uap->data;
}
} else if ((com&IOC_OUT) && size) {
1994-05-24 10:09:53 +00:00
/*
* Zero the buffer so the user always
* gets back something deterministic.
*/
bzero(data, size);
} else if (com&IOC_VOID) {
1994-05-24 10:09:53 +00:00
*(caddr_t *)data = uap->data;
}
1994-05-24 10:09:53 +00:00
switch (com) {
case FIONBIO:
FILE_LOCK(fp);
if ((tmp = *(int *)data))
1994-05-24 10:09:53 +00:00
fp->f_flag |= FNONBLOCK;
else
fp->f_flag &= ~FNONBLOCK;
FILE_UNLOCK(fp);
error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
1994-05-24 10:09:53 +00:00
break;
case FIOASYNC:
FILE_LOCK(fp);
if ((tmp = *(int *)data))
1994-05-24 10:09:53 +00:00
fp->f_flag |= FASYNC;
else
fp->f_flag &= ~FASYNC;
FILE_UNLOCK(fp);
error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
1994-05-24 10:09:53 +00:00
break;
default:
error = fo_ioctl(fp, com, data, td->td_ucred, td);
1994-05-24 10:09:53 +00:00
/*
* Copy any data to user, size was
* already set and checked above.
*/
if (error == 0 && (com&IOC_OUT) && size)
error = copyout(data, uap->data, (u_int)size);
break;
}
if (memp)
free(memp, M_IOCTLOPS);
fdrop(fp, td);
done:
mtx_unlock(&Giant);
1994-05-24 10:09:53 +00:00
return (error);
}
/*
* sellock and selwait are initialized in selectinit() via SYSINIT.
*/
struct mtx sellock;
struct cv selwait;
u_int nselcoll; /* Select collisions since boot */
SYSCTL_UINT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, "");
1994-05-24 10:09:53 +00:00
/*
* Select system call.
*/
#ifndef _SYS_SYSPROTO_H_
1994-05-24 10:09:53 +00:00
struct select_args {
int nd;
1994-05-24 10:09:53 +00:00
fd_set *in, *ou, *ex;
struct timeval *tv;
};
#endif
/*
* MPSAFE
*/
int
select(td, uap)
register struct thread *td;
1994-05-24 10:09:53 +00:00
register struct select_args *uap;
{
struct timeval tv, *tvp;
int error;
if (uap->tv != NULL) {
error = copyin(uap->tv, &tv, sizeof(tv));
if (error)
return (error);
tvp = &tv;
} else
tvp = NULL;
return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp));
}
int
kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
fd_set *fd_ex, struct timeval *tvp)
1994-05-24 10:09:53 +00:00
{
struct filedesc *fdp;
/*
* The magic 2048 here is chosen to be just enough for FD_SETSIZE
* infds with the new FD_SETSIZE of 1024, and more than enough for
* FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
* of 256.
*/
fd_mask s_selbits[howmany(2048, NFDBITS)];
fd_mask *ibits[3], *obits[3], *selbits, *sbp;
struct timeval atv, rtv, ttv;
int error, timo;
u_int ncoll, nbufbytes, ncpbytes, nfdbits;
1994-05-24 10:09:53 +00:00
if (nd < 0)
1996-08-20 15:03:41 +00:00
return (EINVAL);
fdp = td->td_proc->p_fd;
/*
* XXX: kern_select() currently requires that we acquire Giant
* even if none of the file descriptors we poll requires Giant.
*/
mtx_lock(&Giant);
FILEDESC_LOCK(fdp);
if (nd > td->td_proc->p_fd->fd_nfiles)
nd = td->td_proc->p_fd->fd_nfiles; /* forgiving; slightly wrong */
FILEDESC_UNLOCK(fdp);
/*
* Allocate just enough bits for the non-null fd_sets. Use the
* preallocated auto buffer if possible.
*/
nfdbits = roundup(nd, NFDBITS);
ncpbytes = nfdbits / NBBY;
nbufbytes = 0;
if (fd_in != NULL)
nbufbytes += 2 * ncpbytes;
if (fd_ou != NULL)
nbufbytes += 2 * ncpbytes;
if (fd_ex != NULL)
nbufbytes += 2 * ncpbytes;
if (nbufbytes <= sizeof s_selbits)
selbits = &s_selbits[0];
else
selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1994-05-24 10:09:53 +00:00
/*
* Assign pointers into the bit buffers and fetch the input bits.
* Put the output buffers together so that they can be bzeroed
* together.
*/
sbp = selbits;
1994-05-24 10:09:53 +00:00
#define getbits(name, x) \
do { \
if (name == NULL) \
ibits[x] = NULL; \
else { \
ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
obits[x] = sbp; \
sbp += ncpbytes / sizeof *sbp; \
error = copyin(name, ibits[x], ncpbytes); \
if (error != 0) \
goto done_nosellock; \
} \
} while (0)
getbits(fd_in, 0);
getbits(fd_ou, 1);
getbits(fd_ex, 2);
1994-05-24 10:09:53 +00:00
#undef getbits
if (nbufbytes != 0)
bzero(selbits, nbufbytes / 2);
1994-05-24 10:09:53 +00:00
if (tvp != NULL) {
atv = *tvp;
1994-05-24 10:09:53 +00:00
if (itimerfix(&atv)) {
error = EINVAL;
goto done_nosellock;
1994-05-24 10:09:53 +00:00
}
getmicrouptime(&rtv);
timevaladd(&atv, &rtv);
} else {
atv.tv_sec = 0;
atv.tv_usec = 0;
}
timo = 0;
TAILQ_INIT(&td->td_selq);
mtx_lock(&sellock);
1994-05-24 10:09:53 +00:00
retry:
ncoll = nselcoll;
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_SELECT;
mtx_unlock_spin(&sched_lock);
mtx_unlock(&sellock);
error = selscan(td, ibits, obits, nd);
mtx_lock(&sellock);
if (error || td->td_retval[0])
1994-05-24 10:09:53 +00:00
goto done;
if (atv.tv_sec || atv.tv_usec) {
getmicrouptime(&rtv);
if (timevalcmp(&rtv, &atv, >=))
goto done;
ttv = atv;
timevalsub(&ttv, &rtv);
timo = ttv.tv_sec > 24 * 60 * 60 ?
24 * 60 * 60 * hz : tvtohz(&ttv);
1994-05-24 10:09:53 +00:00
}
/*
* An event of interest may occur while we do not hold
* sellock, so check TDF_SELECT and the number of
* collisions and rescan the file descriptors if
* necessary.
*/
mtx_lock_spin(&sched_lock);
if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) {
mtx_unlock_spin(&sched_lock);
goto retry;
}
mtx_unlock_spin(&sched_lock);
if (timo > 0)
error = cv_timedwait_sig(&selwait, &sellock, timo);
else
error = cv_wait_sig(&selwait, &sellock);
1994-05-24 10:09:53 +00:00
if (error == 0)
goto retry;
1994-05-24 10:09:53 +00:00
done:
clear_selinfo_list(td);
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_SELECT;
mtx_unlock_spin(&sched_lock);
mtx_unlock(&sellock);
done_nosellock:
1994-05-24 10:09:53 +00:00
/* select is not restarted after signals... */
if (error == ERESTART)
error = EINTR;
if (error == EWOULDBLOCK)
error = 0;
#define putbits(name, x) \
if (name && (error2 = copyout(obits[x], name, ncpbytes))) \
1994-05-24 10:09:53 +00:00
error = error2;
if (error == 0) {
int error2;
putbits(fd_in, 0);
putbits(fd_ou, 1);
putbits(fd_ex, 2);
1994-05-24 10:09:53 +00:00
#undef putbits
}
if (selbits != &s_selbits[0])
free(selbits, M_SELECT);
mtx_unlock(&Giant);
1994-05-24 10:09:53 +00:00
return (error);
}
static int
selscan(td, ibits, obits, nfd)
struct thread *td;
fd_mask **ibits, **obits;
int nfd;
1994-05-24 10:09:53 +00:00
{
int msk, i, fd;
fd_mask bits;
1994-05-24 10:09:53 +00:00
struct file *fp;
int n = 0;
/* Note: backend also returns POLLHUP/POLLERR if appropriate. */
static int flag[3] = { POLLRDNORM, POLLWRNORM, POLLRDBAND };
struct filedesc *fdp = td->td_proc->p_fd;
1994-05-24 10:09:53 +00:00
FILEDESC_LOCK(fdp);
1994-05-24 10:09:53 +00:00
for (msk = 0; msk < 3; msk++) {
if (ibits[msk] == NULL)
continue;
1994-05-24 10:09:53 +00:00
for (i = 0; i < nfd; i += NFDBITS) {
bits = ibits[msk][i/NFDBITS];
/* ffs(int mask) not portable, fd_mask is long */
for (fd = i; bits && fd < nfd; fd++, bits >>= 1) {
if (!(bits & 1))
continue;
if ((fp = fget_locked(fdp, fd)) == NULL) {
FILEDESC_UNLOCK(fdp);
1994-05-24 10:09:53 +00:00
return (EBADF);
}
Make similar changes to fo_stat() and fo_poll() as made earlier to fo_read() and fo_write(): explicitly use the cred argument to fo_poll() as "active_cred" using the passed file descriptor's f_cred reference to provide access to the file credential. Add an active_cred argument to fo_stat() so that implementers have access to the active credential as well as the file credential. Generally modify callers of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which was redundantly provided via the fp argument. This set of modifications also permits threads to perform these operations on behalf of another thread without modifying their credential. Trickle this change down into fo_stat/poll() implementations: - badfo_poll(), badfo_stat(): modify/add arguments. - kqueue_poll(), kqueue_stat(): modify arguments. - pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to MAC checks rather than td->td_ucred. - soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather than cred to pru_sopoll() to maintain current semantics. - sopoll(): moidfy arguments. - vn_poll(), vn_statfile(): modify/add arguments, pass new arguments to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL() to maintian current semantics. - vn_close(): rename cred to file_cred to reflect reality while I'm here. - vn_stat(): Add active_cred and file_cred arguments to vn_stat() and consumers so that this distinction is maintained at the VFS as well as 'struct file' layer. Pass active_cred instead of td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics. - fifofs: modify the creation of a "filetemp" so that the file credential is properly initialized and can be used in the socket code if desired. Pass ap->a_td->td_ucred as the active credential to soo_poll(). If we teach the vnop interface about the distinction between file and active credentials, we would use the active credential here. Note that current inconsistent passing of active_cred vs. file_cred to VOP's is maintained. It's not clear why GETATTR would be authorized using active_cred while POLL would be authorized using file_cred at the file system level. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
if (fo_poll(fp, flag[msk], td->td_ucred,
td)) {
obits[msk][(fd)/NFDBITS] |=
((fd_mask)1 << ((fd) % NFDBITS));
1994-05-24 10:09:53 +00:00
n++;
}
}
}
}
FILEDESC_UNLOCK(fdp);
td->td_retval[0] = n;
1994-05-24 10:09:53 +00:00
return (0);
}
/*
* Poll system call.
*/
#ifndef _SYS_SYSPROTO_H_
struct poll_args {
struct pollfd *fds;
u_int nfds;
int timeout;
};
#endif
/*
* MPSAFE
*/
int
poll(td, uap)
struct thread *td;
struct poll_args *uap;
{
caddr_t bits;
char smallbits[32 * sizeof(struct pollfd)];
struct timeval atv, rtv, ttv;
int error = 0, timo;
u_int ncoll, nfds;
size_t ni;
2002-12-14 01:56:26 +00:00
nfds = uap->nfds;
/*
* XXX: poll() currently requires that we acquire Giant even if
* none of the file descriptors we poll requires Giant.
*/
mtx_lock(&Giant);
/*
* This is kinda bogus. We have fd limits, but that is not
* really related to the size of the pollfd array. Make sure
* we let the process use at least FD_SETSIZE entries and at
* least enough for the current limits. We want to be reasonably
* safe, but not overly restrictive.
*/
Locking for the per-process resource limits structure. - struct plimit includes a mutex to protect a reference count. The plimit structure is treated similarly to struct ucred in that is is always copy on write, so having a reference to a structure is sufficient to read from it without needing a further lock. - The proc lock protects the p_limit pointer and must be held while reading limits from a process to keep the limit structure from changing out from under you while reading from it. - Various global limits that are ints are not protected by a lock since int writes are atomic on all the archs we support and thus a lock wouldn't buy us anything. - All accesses to individual resource limits from a process are abstracted behind a simple lim_rlimit(), lim_max(), and lim_cur() API that return either an rlimit, or the current or max individual limit of the specified resource from a process. - dosetrlimit() was renamed to kern_setrlimit() to match existing style of other similar syscall helper functions. - The alpha OSF/1 compat layer no longer calls getrlimit() and setrlimit() (it didn't used the stackgap when it should have) but uses lim_rlimit() and kern_setrlimit() instead. - The svr4 compat no longer uses the stackgap for resource limits calls, but uses lim_rlimit() and kern_setrlimit() instead. - The ibcs2 compat no longer uses the stackgap for resource limits. It also no longer uses the stackgap for accessing sysctl's for the ibcs2_sysconf() syscall but uses kernel_sysctl() instead. As a result, ibcs2_sysconf() no longer needs Giant. - The p_rlimit macro no longer exists. Submitted by: mtm (mostly, I only did a few cleanups and catchups) Tested on: i386 Compiled on: alpha, amd64
2004-02-04 21:52:57 +00:00
PROC_LOCK(td->td_proc);
if ((nfds > lim_cur(td->td_proc, RLIMIT_NOFILE)) &&
(nfds > FD_SETSIZE)) {
Locking for the per-process resource limits structure. - struct plimit includes a mutex to protect a reference count. The plimit structure is treated similarly to struct ucred in that is is always copy on write, so having a reference to a structure is sufficient to read from it without needing a further lock. - The proc lock protects the p_limit pointer and must be held while reading limits from a process to keep the limit structure from changing out from under you while reading from it. - Various global limits that are ints are not protected by a lock since int writes are atomic on all the archs we support and thus a lock wouldn't buy us anything. - All accesses to individual resource limits from a process are abstracted behind a simple lim_rlimit(), lim_max(), and lim_cur() API that return either an rlimit, or the current or max individual limit of the specified resource from a process. - dosetrlimit() was renamed to kern_setrlimit() to match existing style of other similar syscall helper functions. - The alpha OSF/1 compat layer no longer calls getrlimit() and setrlimit() (it didn't used the stackgap when it should have) but uses lim_rlimit() and kern_setrlimit() instead. - The svr4 compat no longer uses the stackgap for resource limits calls, but uses lim_rlimit() and kern_setrlimit() instead. - The ibcs2 compat no longer uses the stackgap for resource limits. It also no longer uses the stackgap for accessing sysctl's for the ibcs2_sysconf() syscall but uses kernel_sysctl() instead. As a result, ibcs2_sysconf() no longer needs Giant. - The p_rlimit macro no longer exists. Submitted by: mtm (mostly, I only did a few cleanups and catchups) Tested on: i386 Compiled on: alpha, amd64
2004-02-04 21:52:57 +00:00
PROC_UNLOCK(td->td_proc);
error = EINVAL;
goto done2;
}
Locking for the per-process resource limits structure. - struct plimit includes a mutex to protect a reference count. The plimit structure is treated similarly to struct ucred in that is is always copy on write, so having a reference to a structure is sufficient to read from it without needing a further lock. - The proc lock protects the p_limit pointer and must be held while reading limits from a process to keep the limit structure from changing out from under you while reading from it. - Various global limits that are ints are not protected by a lock since int writes are atomic on all the archs we support and thus a lock wouldn't buy us anything. - All accesses to individual resource limits from a process are abstracted behind a simple lim_rlimit(), lim_max(), and lim_cur() API that return either an rlimit, or the current or max individual limit of the specified resource from a process. - dosetrlimit() was renamed to kern_setrlimit() to match existing style of other similar syscall helper functions. - The alpha OSF/1 compat layer no longer calls getrlimit() and setrlimit() (it didn't used the stackgap when it should have) but uses lim_rlimit() and kern_setrlimit() instead. - The svr4 compat no longer uses the stackgap for resource limits calls, but uses lim_rlimit() and kern_setrlimit() instead. - The ibcs2 compat no longer uses the stackgap for resource limits. It also no longer uses the stackgap for accessing sysctl's for the ibcs2_sysconf() syscall but uses kernel_sysctl() instead. As a result, ibcs2_sysconf() no longer needs Giant. - The p_rlimit macro no longer exists. Submitted by: mtm (mostly, I only did a few cleanups and catchups) Tested on: i386 Compiled on: alpha, amd64
2004-02-04 21:52:57 +00:00
PROC_UNLOCK(td->td_proc);
ni = nfds * sizeof(struct pollfd);
if (ni > sizeof(smallbits))
bits = malloc(ni, M_TEMP, M_WAITOK);
else
bits = smallbits;
2002-12-14 01:56:26 +00:00
error = copyin(uap->fds, bits, ni);
if (error)
goto done_nosellock;
2002-12-14 01:56:26 +00:00
if (uap->timeout != INFTIM) {
atv.tv_sec = uap->timeout / 1000;
atv.tv_usec = (uap->timeout % 1000) * 1000;
if (itimerfix(&atv)) {
error = EINVAL;
goto done_nosellock;
}
getmicrouptime(&rtv);
timevaladd(&atv, &rtv);
} else {
atv.tv_sec = 0;
atv.tv_usec = 0;
}
timo = 0;
TAILQ_INIT(&td->td_selq);
mtx_lock(&sellock);
retry:
ncoll = nselcoll;
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_SELECT;
mtx_unlock_spin(&sched_lock);
mtx_unlock(&sellock);
error = pollscan(td, (struct pollfd *)bits, nfds);
mtx_lock(&sellock);
if (error || td->td_retval[0])
goto done;
if (atv.tv_sec || atv.tv_usec) {
getmicrouptime(&rtv);
if (timevalcmp(&rtv, &atv, >=))
goto done;
ttv = atv;
timevalsub(&ttv, &rtv);
timo = ttv.tv_sec > 24 * 60 * 60 ?
24 * 60 * 60 * hz : tvtohz(&ttv);
}
/*
* An event of interest may occur while we do not hold
* sellock, so check TDF_SELECT and the number of collisions
* and rescan the file descriptors if necessary.
*/
mtx_lock_spin(&sched_lock);
if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) {
mtx_unlock_spin(&sched_lock);
goto retry;
}
mtx_unlock_spin(&sched_lock);
if (timo > 0)
error = cv_timedwait_sig(&selwait, &sellock, timo);
else
error = cv_wait_sig(&selwait, &sellock);
if (error == 0)
goto retry;
done:
clear_selinfo_list(td);
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_SELECT;
mtx_unlock_spin(&sched_lock);
mtx_unlock(&sellock);
done_nosellock:
/* poll is not restarted after signals... */
if (error == ERESTART)
error = EINTR;
if (error == EWOULDBLOCK)
error = 0;
if (error == 0) {
2002-12-14 01:56:26 +00:00
error = copyout(bits, uap->fds, ni);
if (error)
goto out;
}
out:
if (ni > sizeof(smallbits))
free(bits, M_TEMP);
done2:
mtx_unlock(&Giant);
return (error);
}
static int
pollscan(td, fds, nfd)
struct thread *td;
struct pollfd *fds;
u_int nfd;
{
register struct filedesc *fdp = td->td_proc->p_fd;
int i;
struct file *fp;
int n = 0;
FILEDESC_LOCK(fdp);
for (i = 0; i < nfd; i++, fds++) {
if (fds->fd >= fdp->fd_nfiles) {
fds->revents = POLLNVAL;
n++;
} else if (fds->fd < 0) {
fds->revents = 0;
} else {
fp = fdp->fd_ofiles[fds->fd];
if (fp == NULL) {
fds->revents = POLLNVAL;
n++;
} else {
/*
* Note: backend also returns POLLHUP and
* POLLERR if appropriate.
*/
fds->revents = fo_poll(fp, fds->events,
Make similar changes to fo_stat() and fo_poll() as made earlier to fo_read() and fo_write(): explicitly use the cred argument to fo_poll() as "active_cred" using the passed file descriptor's f_cred reference to provide access to the file credential. Add an active_cred argument to fo_stat() so that implementers have access to the active credential as well as the file credential. Generally modify callers of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which was redundantly provided via the fp argument. This set of modifications also permits threads to perform these operations on behalf of another thread without modifying their credential. Trickle this change down into fo_stat/poll() implementations: - badfo_poll(), badfo_stat(): modify/add arguments. - kqueue_poll(), kqueue_stat(): modify arguments. - pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to MAC checks rather than td->td_ucred. - soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather than cred to pru_sopoll() to maintain current semantics. - sopoll(): moidfy arguments. - vn_poll(), vn_statfile(): modify/add arguments, pass new arguments to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL() to maintian current semantics. - vn_close(): rename cred to file_cred to reflect reality while I'm here. - vn_stat(): Add active_cred and file_cred arguments to vn_stat() and consumers so that this distinction is maintained at the VFS as well as 'struct file' layer. Pass active_cred instead of td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics. - fifofs: modify the creation of a "filetemp" so that the file credential is properly initialized and can be used in the socket code if desired. Pass ap->a_td->td_ucred as the active credential to soo_poll(). If we teach the vnop interface about the distinction between file and active credentials, we would use the active credential here. Note that current inconsistent passing of active_cred vs. file_cred to VOP's is maintained. It's not clear why GETATTR would be authorized using active_cred while POLL would be authorized using file_cred at the file system level. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
td->td_ucred, td);
if (fds->revents != 0)
n++;
}
}
}
FILEDESC_UNLOCK(fdp);
td->td_retval[0] = n;
return (0);
}
/*
* OpenBSD poll system call.
* XXX this isn't quite a true representation.. OpenBSD uses select ops.
*/
#ifndef _SYS_SYSPROTO_H_
struct openbsd_poll_args {
struct pollfd *fds;
u_int nfds;
int timeout;
};
#endif
/*
* MPSAFE
*/
int
openbsd_poll(td, uap)
register struct thread *td;
register struct openbsd_poll_args *uap;
{
return (poll(td, (struct poll_args *)uap));
}
/*
* Remove the references to the thread from all of the objects
* we were polling.
*
* This code assumes that the underlying owner of the selinfo
* structure will hold sellock before it changes it, and that
* it will unlink itself from our list if it goes away.
*/
void
clear_selinfo_list(td)
struct thread *td;
{
struct selinfo *si;
mtx_assert(&sellock, MA_OWNED);
TAILQ_FOREACH(si, &td->td_selq, si_thrlist)
si->si_thread = NULL;
TAILQ_INIT(&td->td_selq);
}
1994-05-24 10:09:53 +00:00
/*
* Record a select request.
*/
void
selrecord(selector, sip)
struct thread *selector;
1994-05-24 10:09:53 +00:00
struct selinfo *sip;
{
mtx_lock(&sellock);
/*
* If the selinfo's thread pointer is NULL then take ownership of it.
*
* If the thread pointer is not NULL and it points to another
* thread, then we have a collision.
*
* If the thread pointer is not NULL and points back to us then leave
* it alone as we've already added pointed it at us and added it to
* our list.
*/
if (sip->si_thread == NULL) {
sip->si_thread = selector;
TAILQ_INSERT_TAIL(&selector->td_selq, sip, si_thrlist);
} else if (sip->si_thread != selector) {
sip->si_flags |= SI_COLL;
}
mtx_unlock(&sellock);
1994-05-24 10:09:53 +00:00
}
/* Wake up a selecting thread. */
void
selwakeup(sip)
struct selinfo *sip;
{
doselwakeup(sip, -1);
}
/* Wake up a selecting thread, and set its priority. */
void
selwakeuppri(sip, pri)
struct selinfo *sip;
int pri;
{
doselwakeup(sip, pri);
}
1994-05-24 10:09:53 +00:00
/*
* Do a wakeup when a selectable event occurs.
*/
static void
doselwakeup(sip, pri)
struct selinfo *sip;
int pri;
1994-05-24 10:09:53 +00:00
{
struct thread *td;
1994-05-24 10:09:53 +00:00
mtx_lock(&sellock);
td = sip->si_thread;
if ((sip->si_flags & SI_COLL) != 0) {
1994-05-24 10:09:53 +00:00
nselcoll++;
sip->si_flags &= ~SI_COLL;
cv_broadcastpri(&selwait, pri);
1994-05-24 10:09:53 +00:00
}
if (td == NULL) {
mtx_unlock(&sellock);
return;
1994-05-24 10:09:53 +00:00
}
TAILQ_REMOVE(&td->td_selq, sip, si_thrlist);
sip->si_thread = NULL;
mtx_lock_spin(&sched_lock);
Switch the sleep/wakeup and condition variable implementations to use the sleep queue interface: - Sleep queues attempt to merge some of the benefits of both sleep queues and condition variables. Having sleep qeueus in a hash table avoids having to allocate a queue head for each wait channel. Thus, struct cv has shrunk down to just a single char * pointer now. However, the hash table does not hold threads directly, but queue heads. This means that once you have located a queue in the hash bucket, you no longer have to walk the rest of the hash chain looking for threads. Instead, you have a list of all the threads sleeping on that wait channel. - Outside of the sleepq code and the sleep/cv code the kernel no longer differentiates between cv's and sleep/wakeup. For example, calls to abortsleep() and cv_abort() are replaced with a call to sleepq_abort(). Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and cv_waitq_remove() have been replaced with calls to sleepq_remove(). - The sched_sleep() function no longer accepts a priority argument as sleep's no longer inherently bump the priority. Instead, this is soley a propery of msleep() which explicitly calls sched_prio() before blocking. - The TDF_ONSLEEPQ flag has been dropped as it was never used. The associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been dropped and replaced with a single explicit clearing of td_wchan. TD_SET_ONSLEEPQ() would really have only made sense if it had taken the wait channel and message as arguments anyway. Now that that only happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
td->td_flags &= ~TDF_SELECT;
mtx_unlock_spin(&sched_lock);
Switch the sleep/wakeup and condition variable implementations to use the sleep queue interface: - Sleep queues attempt to merge some of the benefits of both sleep queues and condition variables. Having sleep qeueus in a hash table avoids having to allocate a queue head for each wait channel. Thus, struct cv has shrunk down to just a single char * pointer now. However, the hash table does not hold threads directly, but queue heads. This means that once you have located a queue in the hash bucket, you no longer have to walk the rest of the hash chain looking for threads. Instead, you have a list of all the threads sleeping on that wait channel. - Outside of the sleepq code and the sleep/cv code the kernel no longer differentiates between cv's and sleep/wakeup. For example, calls to abortsleep() and cv_abort() are replaced with a call to sleepq_abort(). Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and cv_waitq_remove() have been replaced with calls to sleepq_remove(). - The sched_sleep() function no longer accepts a priority argument as sleep's no longer inherently bump the priority. Instead, this is soley a propery of msleep() which explicitly calls sched_prio() before blocking. - The TDF_ONSLEEPQ flag has been dropped as it was never used. The associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been dropped and replaced with a single explicit clearing of td_wchan. TD_SET_ONSLEEPQ() would really have only made sense if it had taken the wait channel and message as arguments anyway. Now that that only happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
sleepq_remove(td, &selwait);
mtx_unlock(&sellock);
1994-05-24 10:09:53 +00:00
}
2002-03-19 21:25:46 +00:00
static void selectinit(void *);
SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, selectinit, NULL)
/* ARGSUSED*/
static void
selectinit(dummy)
void *dummy;
{
cv_init(&selwait, "select");
mtx_init(&sellock, "sellck", NULL, MTX_DEF);
}