2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2005-06-30 00:19:08 +00:00
|
|
|
#include "opt_compat.h"
|
1996-01-03 21:42:35 +00:00
|
|
|
#include "opt_ktrace.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1995-11-12 06:43:28 +00:00
|
|
|
#include <sys/sysproto.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/filedesc.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/filio.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/proc.h>
|
1994-10-02 17:35:40 +00:00
|
|
|
#include <sys/signalvar.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/socketvar.h>
|
|
|
|
#include <sys/uio.h>
|
|
|
|
#include <sys/kernel.h>
|
2003-04-29 13:36:06 +00:00
|
|
|
#include <sys/limits.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/malloc.h>
|
1997-09-14 02:30:32 +00:00
|
|
|
#include <sys/poll.h>
|
2001-02-07 23:28:01 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2001-01-09 04:33:49 +00:00
|
|
|
#include <sys/selinfo.h>
|
Switch the sleep/wakeup and condition variable implementations to use the
sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
and condition variables. Having sleep qeueus in a hash table avoids
having to allocate a queue head for each wait channel. Thus, struct cv
has shrunk down to just a single char * pointer now. However, the
hash table does not hold threads directly, but queue heads. This means
that once you have located a queue in the hash bucket, you no longer have
to walk the rest of the hash chain looking for threads. Instead, you have
a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
differentiates between cv's and sleep/wakeup. For example, calls to
abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and
cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
sleep's no longer inherently bump the priority. Instead, this is soley
a propery of msleep() which explicitly calls sched_prio() before
blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used. The
associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
dropped and replaced with a single explicit clearing of td_wchan.
TD_SET_ONSLEEPQ() would really have only made sense if it had taken
the wait channel and message as arguments anyway. Now that that only
happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
|
|
|
#include <sys/sleepqueue.h>
|
2002-09-01 20:37:28 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
2000-01-05 19:40:17 +00:00
|
|
|
#include <sys/sysctl.h>
|
1997-09-14 02:30:32 +00:00
|
|
|
#include <sys/sysent.h>
|
2004-01-20 01:27:42 +00:00
|
|
|
#include <sys/vnode.h>
|
2000-11-18 21:01:04 +00:00
|
|
|
#include <sys/bio.h>
|
|
|
|
#include <sys/buf.h>
|
2001-05-14 05:26:48 +00:00
|
|
|
#include <sys/condvar.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
|
|
|
|
1997-10-12 20:26:33 +00:00
|
|
|
static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
|
|
|
|
static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
|
|
|
|
MALLOC_DEFINE(M_IOV, "iov", "large iov's");
|
1997-10-11 18:31:40 +00:00
|
|
|
|
2002-03-09 22:44:37 +00:00
|
|
|
static int pollscan(struct thread *, struct pollfd *, u_int);
|
|
|
|
static int selscan(struct thread *, fd_mask **, fd_mask **, int);
|
2005-07-07 18:17:55 +00:00
|
|
|
static int dofileread(struct thread *, int, struct file *, struct uio *,
|
|
|
|
off_t, int);
|
|
|
|
static int dofilewrite(struct thread *, int, struct file *, struct uio *,
|
|
|
|
off_t, int);
|
2003-11-09 09:17:26 +00:00
|
|
|
static void doselwakeup(struct selinfo *, int);
|
1999-04-04 21:41:28 +00:00
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct read_args {
|
|
|
|
int fd;
|
1998-09-05 14:30:11 +00:00
|
|
|
void *buf;
|
|
|
|
size_t nbyte;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
read(td, uap)
|
|
|
|
struct thread *td;
|
2001-11-14 06:30:36 +00:00
|
|
|
struct read_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2005-07-07 18:17:55 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
2000-11-18 21:01:04 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-07-07 18:17:55 +00:00
|
|
|
if (uap->nbyte > INT_MAX)
|
|
|
|
return (EINVAL);
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->nbyte;
|
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_resid = uap->nbyte;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
error = kern_readv(td, uap->fd, &auio);
|
2000-11-18 21:01:04 +00:00
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1999-03-27 21:16:58 +00:00
|
|
|
/*
|
2005-07-07 18:17:55 +00:00
|
|
|
* Positioned read system call
|
1999-03-27 21:16:58 +00:00
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct pread_args {
|
|
|
|
int fd;
|
|
|
|
void *buf;
|
|
|
|
size_t nbyte;
|
1999-04-04 21:41:28 +00:00
|
|
|
int pad;
|
|
|
|
off_t offset;
|
1999-03-27 21:16:58 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
pread(td, uap)
|
|
|
|
struct thread *td;
|
2001-11-14 06:30:36 +00:00
|
|
|
struct pread_args *uap;
|
1999-04-04 21:41:28 +00:00
|
|
|
{
|
1999-03-27 21:16:58 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
2005-07-07 18:17:55 +00:00
|
|
|
int error;
|
1999-03-27 21:16:58 +00:00
|
|
|
|
2005-07-07 18:17:55 +00:00
|
|
|
if (uap->nbyte > INT_MAX)
|
|
|
|
return (EINVAL);
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->nbyte;
|
1999-03-27 21:16:58 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
2005-07-07 18:17:55 +00:00
|
|
|
auio.uio_resid = uap->nbyte;
|
1999-03-27 21:16:58 +00:00
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2005-07-07 18:17:55 +00:00
|
|
|
error = kern_preadv(td, uap->fd, &auio, uap->offset);
|
|
|
|
return(error);
|
1999-03-27 21:16:58 +00:00
|
|
|
}
|
|
|
|
|
2007-07-04 22:57:21 +00:00
|
|
|
int
|
|
|
|
freebsd6_pread(td, uap)
|
|
|
|
struct thread *td;
|
|
|
|
struct freebsd6_pread_args *uap;
|
|
|
|
{
|
|
|
|
struct pread_args oargs;
|
|
|
|
|
|
|
|
oargs.fd = uap->fd;
|
|
|
|
oargs.buf = uap->buf;
|
|
|
|
oargs.nbyte = uap->nbyte;
|
|
|
|
oargs.offset = uap->offset;
|
|
|
|
return (pread(td, &oargs));
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Scatter read system call.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct readv_args {
|
1995-11-11 06:57:34 +00:00
|
|
|
int fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct iovec *iovp;
|
|
|
|
u_int iovcnt;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2004-07-10 15:42:16 +00:00
|
|
|
readv(struct thread *td, struct readv_args *uap)
|
2005-03-31 22:51:18 +00:00
|
|
|
{
|
|
|
|
struct uio *auio;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyinuio(uap->iovp, uap->iovcnt, &auio);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = kern_readv(td, uap->fd, auio);
|
|
|
|
free(auio, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_readv(struct thread *td, int fd, struct uio *auio)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-11-14 06:30:36 +00:00
|
|
|
struct file *fp;
|
2002-05-09 02:30:41 +00:00
|
|
|
int error;
|
2005-07-07 18:17:55 +00:00
|
|
|
|
|
|
|
error = fget_read(td, fd, &fp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scatter positioned read system call.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct preadv_args {
|
|
|
|
int fd;
|
|
|
|
struct iovec *iovp;
|
|
|
|
u_int iovcnt;
|
|
|
|
off_t offset;
|
|
|
|
};
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
2005-07-07 18:17:55 +00:00
|
|
|
int
|
|
|
|
preadv(struct thread *td, struct preadv_args *uap)
|
|
|
|
{
|
|
|
|
struct uio *auio;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyinuio(uap->iovp, uap->iovcnt, &auio);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = kern_preadv(td, uap->fd, auio, uap->offset);
|
|
|
|
free(auio, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_preadv(td, fd, auio, offset)
|
|
|
|
struct thread *td;
|
|
|
|
int fd;
|
|
|
|
struct uio *auio;
|
|
|
|
off_t offset;
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-03-31 22:51:18 +00:00
|
|
|
error = fget_read(td, fd, &fp);
|
2004-07-10 15:42:16 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2005-07-07 18:17:55 +00:00
|
|
|
if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
|
|
|
|
error = ESPIPE;
|
|
|
|
else if (offset < 0 && fp->f_vnode->v_type != VCHR)
|
|
|
|
error = EINVAL;
|
|
|
|
else
|
|
|
|
error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common code for readv and preadv that reads data in
|
|
|
|
* from a file using the passed in uio, offset, and flags.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
dofileread(td, fd, fp, auio, offset, flags)
|
|
|
|
struct thread *td;
|
|
|
|
int fd;
|
|
|
|
struct file *fp;
|
|
|
|
struct uio *auio;
|
|
|
|
off_t offset;
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
ssize_t cnt;
|
|
|
|
int error;
|
|
|
|
#ifdef KTRACE
|
|
|
|
struct uio *ktruio = NULL;
|
|
|
|
#endif
|
|
|
|
|
2005-01-25 09:15:32 +00:00
|
|
|
/* Finish zero length reads right here */
|
|
|
|
if (auio->uio_resid == 0) {
|
|
|
|
td->td_retval[0] = 0;
|
|
|
|
return(0);
|
|
|
|
}
|
2004-07-10 15:42:16 +00:00
|
|
|
auio->uio_rw = UIO_READ;
|
2005-07-07 18:17:55 +00:00
|
|
|
auio->uio_offset = offset;
|
2004-07-10 15:42:16 +00:00
|
|
|
auio->uio_td = td;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
if (KTRPOINT(td, KTR_GENIO))
|
|
|
|
ktruio = cloneuio(auio);
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
2004-07-10 15:42:16 +00:00
|
|
|
cnt = auio->uio_resid;
|
2005-07-07 18:17:55 +00:00
|
|
|
if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
|
2004-07-10 15:42:16 +00:00
|
|
|
if (auio->uio_resid != cnt && (error == ERESTART ||
|
1994-05-24 10:09:53 +00:00
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
2004-07-10 15:42:16 +00:00
|
|
|
cnt -= auio->uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
if (ktruio != NULL) {
|
|
|
|
ktruio->uio_resid = cnt;
|
2005-03-31 22:51:18 +00:00
|
|
|
ktrgenio(fd, UIO_READ, ktruio, error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = cnt;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct write_args {
|
|
|
|
int fd;
|
1998-09-05 14:30:11 +00:00
|
|
|
const void *buf;
|
|
|
|
size_t nbyte;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
write(td, uap)
|
|
|
|
struct thread *td;
|
2001-11-14 06:30:36 +00:00
|
|
|
struct write_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2005-07-07 18:17:55 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
2000-11-18 21:01:04 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-07-07 18:17:55 +00:00
|
|
|
if (uap->nbyte > INT_MAX)
|
|
|
|
return (EINVAL);
|
|
|
|
aiov.iov_base = (void *)(uintptr_t)uap->buf;
|
|
|
|
aiov.iov_len = uap->nbyte;
|
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_resid = uap->nbyte;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
error = kern_writev(td, uap->fd, &auio);
|
2000-11-18 21:01:04 +00:00
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1999-03-27 21:16:58 +00:00
|
|
|
/*
|
2007-03-04 22:36:48 +00:00
|
|
|
* Positioned write system call.
|
1999-03-27 21:16:58 +00:00
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct pwrite_args {
|
|
|
|
int fd;
|
|
|
|
const void *buf;
|
|
|
|
size_t nbyte;
|
1999-04-04 21:41:28 +00:00
|
|
|
int pad;
|
|
|
|
off_t offset;
|
1999-03-27 21:16:58 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
pwrite(td, uap)
|
|
|
|
struct thread *td;
|
2001-11-14 06:30:36 +00:00
|
|
|
struct pwrite_args *uap;
|
1999-04-04 21:41:28 +00:00
|
|
|
{
|
1999-03-27 21:16:58 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
2005-07-07 18:17:55 +00:00
|
|
|
int error;
|
1999-03-27 21:16:58 +00:00
|
|
|
|
2005-07-07 18:17:55 +00:00
|
|
|
if (uap->nbyte > INT_MAX)
|
|
|
|
return (EINVAL);
|
|
|
|
aiov.iov_base = (void *)(uintptr_t)uap->buf;
|
|
|
|
aiov.iov_len = uap->nbyte;
|
1999-03-27 21:16:58 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
2005-07-07 18:17:55 +00:00
|
|
|
auio.uio_resid = uap->nbyte;
|
1999-03-27 21:16:58 +00:00
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
2005-07-07 18:17:55 +00:00
|
|
|
error = kern_pwritev(td, uap->fd, &auio, uap->offset);
|
|
|
|
return(error);
|
1999-03-27 21:16:58 +00:00
|
|
|
}
|
|
|
|
|
2007-07-04 22:57:21 +00:00
|
|
|
int
|
|
|
|
freebsd6_pwrite(td, uap)
|
|
|
|
struct thread *td;
|
|
|
|
struct freebsd6_pwrite_args *uap;
|
|
|
|
{
|
|
|
|
struct pwrite_args oargs;
|
|
|
|
|
|
|
|
oargs.fd = uap->fd;
|
|
|
|
oargs.buf = uap->buf;
|
|
|
|
oargs.nbyte = uap->nbyte;
|
|
|
|
oargs.offset = uap->offset;
|
|
|
|
return (pwrite(td, &oargs));
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2007-03-04 22:36:48 +00:00
|
|
|
* Gather write system call.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct writev_args {
|
|
|
|
int fd;
|
|
|
|
struct iovec *iovp;
|
|
|
|
u_int iovcnt;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2004-07-10 15:42:16 +00:00
|
|
|
writev(struct thread *td, struct writev_args *uap)
|
2005-03-31 22:51:18 +00:00
|
|
|
{
|
|
|
|
struct uio *auio;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyinuio(uap->iovp, uap->iovcnt, &auio);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = kern_writev(td, uap->fd, auio);
|
|
|
|
free(auio, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_writev(struct thread *td, int fd, struct uio *auio)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-11-14 06:30:36 +00:00
|
|
|
struct file *fp;
|
2004-07-10 15:42:16 +00:00
|
|
|
int error;
|
2005-07-07 18:17:55 +00:00
|
|
|
|
|
|
|
error = fget_write(td, fd, &fp);
|
|
|
|
if (error)
|
2006-01-06 16:34:22 +00:00
|
|
|
return (error);
|
2005-07-07 18:17:55 +00:00
|
|
|
error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-03-04 22:36:48 +00:00
|
|
|
* Gather positioned write system call.
|
2005-07-07 18:17:55 +00:00
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct pwritev_args {
|
|
|
|
int fd;
|
|
|
|
struct iovec *iovp;
|
|
|
|
u_int iovcnt;
|
|
|
|
off_t offset;
|
|
|
|
};
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
2005-07-07 18:17:55 +00:00
|
|
|
int
|
|
|
|
pwritev(struct thread *td, struct pwritev_args *uap)
|
|
|
|
{
|
|
|
|
struct uio *auio;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyinuio(uap->iovp, uap->iovcnt, &auio);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = kern_pwritev(td, uap->fd, auio, uap->offset);
|
|
|
|
free(auio, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_pwritev(td, fd, auio, offset)
|
|
|
|
struct thread *td;
|
|
|
|
struct uio *auio;
|
|
|
|
int fd;
|
|
|
|
off_t offset;
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-03-31 22:51:18 +00:00
|
|
|
error = fget_write(td, fd, &fp);
|
2004-07-10 15:42:16 +00:00
|
|
|
if (error)
|
2006-01-06 16:34:22 +00:00
|
|
|
return (error);
|
2005-07-07 18:17:55 +00:00
|
|
|
if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
|
|
|
|
error = ESPIPE;
|
|
|
|
else if (offset < 0 && fp->f_vnode->v_type != VCHR)
|
|
|
|
error = EINVAL;
|
|
|
|
else
|
|
|
|
error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common code for writev and pwritev that writes data to
|
|
|
|
* a file using the passed in uio, offset, and flags.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
dofilewrite(td, fd, fp, auio, offset, flags)
|
|
|
|
struct thread *td;
|
|
|
|
int fd;
|
|
|
|
struct file *fp;
|
|
|
|
struct uio *auio;
|
|
|
|
off_t offset;
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
ssize_t cnt;
|
|
|
|
int error;
|
|
|
|
#ifdef KTRACE
|
|
|
|
struct uio *ktruio = NULL;
|
|
|
|
#endif
|
|
|
|
|
2004-07-10 15:42:16 +00:00
|
|
|
auio->uio_rw = UIO_WRITE;
|
|
|
|
auio->uio_td = td;
|
2005-07-07 18:17:55 +00:00
|
|
|
auio->uio_offset = offset;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
if (KTRPOINT(td, KTR_GENIO))
|
|
|
|
ktruio = cloneuio(auio);
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
2004-07-10 15:42:16 +00:00
|
|
|
cnt = auio->uio_resid;
|
2000-12-07 23:45:57 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE)
|
|
|
|
bwillwrite();
|
2005-07-07 18:17:55 +00:00
|
|
|
if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
|
2004-07-10 15:42:16 +00:00
|
|
|
if (auio->uio_resid != cnt && (error == ERESTART ||
|
1994-05-24 10:09:53 +00:00
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2005-07-07 18:17:55 +00:00
|
|
|
/* Socket layer is responsible for issuing SIGPIPE. */
|
2007-03-01 19:20:25 +00:00
|
|
|
if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
|
2001-09-12 08:38:13 +00:00
|
|
|
PROC_LOCK(td->td_proc);
|
|
|
|
psignal(td->td_proc, SIGPIPE);
|
|
|
|
PROC_UNLOCK(td->td_proc);
|
2001-03-07 03:37:06 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-07-10 15:42:16 +00:00
|
|
|
cnt -= auio->uio_resid;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
if (ktruio != NULL) {
|
|
|
|
ktruio->uio_resid = cnt;
|
2005-03-31 22:51:18 +00:00
|
|
|
ktrgenio(fd, UIO_WRITE, ktruio, error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = cnt;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ioctl_args {
|
|
|
|
int fd;
|
1998-08-24 08:39:39 +00:00
|
|
|
u_long com;
|
1994-05-24 10:09:53 +00:00
|
|
|
caddr_t data;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2004-11-14 12:04:34 +00:00
|
|
|
ioctl(struct thread *td, struct ioctl_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-11-14 12:04:34 +00:00
|
|
|
u_long com;
|
2006-09-27 19:57:02 +00:00
|
|
|
int arg, error;
|
2004-11-14 12:04:34 +00:00
|
|
|
u_int size;
|
2006-09-27 19:57:02 +00:00
|
|
|
caddr_t data;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-01-18 07:37:05 +00:00
|
|
|
if (uap->com > 0xffffffff) {
|
|
|
|
printf(
|
|
|
|
"WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
|
|
|
|
td->td_proc->p_pid, td->td_proc->p_comm, uap->com);
|
|
|
|
uap->com &= 0xffffffff;
|
|
|
|
}
|
2006-07-08 20:12:14 +00:00
|
|
|
com = uap->com;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Interpret high order word to find amount of data to be
|
|
|
|
* copied to/from the user's address space.
|
|
|
|
*/
|
|
|
|
size = IOCPARM_LEN(com);
|
2004-11-14 14:34:12 +00:00
|
|
|
if ((size > IOCPARM_MAX) ||
|
|
|
|
((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
|
2005-06-30 00:19:08 +00:00
|
|
|
#if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
|
|
|
|
((com & IOC_OUT) && size == 0) ||
|
|
|
|
#else
|
|
|
|
((com & (IOC_IN | IOC_OUT)) && size == 0) ||
|
|
|
|
#endif
|
2006-09-27 19:57:02 +00:00
|
|
|
((com & IOC_VOID) && size > 0 && size != sizeof(int)))
|
2002-01-13 11:58:06 +00:00
|
|
|
return (ENOTTY);
|
2000-11-18 21:01:04 +00:00
|
|
|
|
2004-11-14 14:34:12 +00:00
|
|
|
if (size > 0) {
|
2006-09-27 19:57:02 +00:00
|
|
|
if (!(com & IOC_VOID))
|
|
|
|
data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
|
|
|
|
else {
|
|
|
|
/* Integer argument. */
|
|
|
|
arg = (intptr_t)uap->data;
|
|
|
|
data = (void *)&arg;
|
|
|
|
size = 0;
|
|
|
|
}
|
|
|
|
} else
|
2004-11-14 14:34:12 +00:00
|
|
|
data = (void *)&uap->data;
|
|
|
|
if (com & IOC_IN) {
|
|
|
|
error = copyin(uap->data, data, (u_int)size);
|
|
|
|
if (error) {
|
2006-10-14 19:01:55 +00:00
|
|
|
if (size > 0)
|
|
|
|
free(data, M_IOCTLOPS);
|
2004-11-14 14:34:12 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
2004-11-14 14:34:12 +00:00
|
|
|
} else if (com & IOC_OUT) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Zero the buffer so the user always
|
|
|
|
* gets back something deterministic.
|
|
|
|
*/
|
|
|
|
bzero(data, size);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-07-08 20:12:14 +00:00
|
|
|
error = kern_ioctl(td, uap->fd, com, data);
|
|
|
|
|
|
|
|
if (error == 0 && (com & IOC_OUT))
|
|
|
|
error = copyout(data, uap->data, (u_int)size);
|
|
|
|
|
2006-09-27 19:57:02 +00:00
|
|
|
if (size > 0)
|
|
|
|
free(data, M_IOCTLOPS);
|
2006-07-08 20:12:14 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
struct filedesc *fdp;
|
|
|
|
int error;
|
|
|
|
int tmp;
|
|
|
|
|
|
|
|
if ((error = fget(td, fd, &fp)) != 0)
|
|
|
|
return (error);
|
|
|
|
if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
fdp = td->td_proc->p_fd;
|
|
|
|
switch (com) {
|
|
|
|
case FIONCLEX:
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2006-07-08 20:12:14 +00:00
|
|
|
fdp->fd_ofileflags[fd] &= ~UF_EXCLOSE;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2006-07-08 20:12:14 +00:00
|
|
|
goto out;
|
|
|
|
case FIOCLEX:
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2006-07-08 20:12:14 +00:00
|
|
|
fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2006-07-08 20:12:14 +00:00
|
|
|
goto out;
|
|
|
|
case FIONBIO:
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_LOCK(fp);
|
1994-09-25 19:34:02 +00:00
|
|
|
if ((tmp = *(int *)data))
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_flag |= FNONBLOCK;
|
|
|
|
else
|
|
|
|
fp->f_flag &= ~FNONBLOCK;
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_UNLOCK(fp);
|
2004-11-16 14:47:04 +00:00
|
|
|
data = (void *)&tmp;
|
2006-07-08 20:12:14 +00:00
|
|
|
break;
|
|
|
|
case FIOASYNC:
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_LOCK(fp);
|
1994-09-25 19:34:02 +00:00
|
|
|
if ((tmp = *(int *)data))
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_flag |= FASYNC;
|
|
|
|
else
|
|
|
|
fp->f_flag &= ~FASYNC;
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_UNLOCK(fp);
|
2004-11-16 14:47:04 +00:00
|
|
|
data = (void *)&tmp;
|
2006-07-08 20:12:14 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-11-16 14:47:04 +00:00
|
|
|
|
|
|
|
error = fo_ioctl(fp, com, data, td->td_ucred, td);
|
2006-07-08 20:12:14 +00:00
|
|
|
out:
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2002-03-14 01:32:30 +00:00
|
|
|
/*
|
|
|
|
* sellock and selwait are initialized in selectinit() via SYSINIT.
|
|
|
|
*/
|
|
|
|
struct mtx sellock;
|
2001-05-14 05:26:48 +00:00
|
|
|
struct cv selwait;
|
2002-06-12 02:08:18 +00:00
|
|
|
u_int nselcoll; /* Select collisions since boot */
|
|
|
|
SYSCTL_UINT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, "");
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct select_args {
|
1996-08-20 07:18:10 +00:00
|
|
|
int nd;
|
1994-05-24 10:09:53 +00:00
|
|
|
fd_set *in, *ou, *ex;
|
|
|
|
struct timeval *tv;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
select(td, uap)
|
|
|
|
register struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct select_args *uap;
|
2002-09-01 20:37:28 +00:00
|
|
|
{
|
|
|
|
struct timeval tv, *tvp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uap->tv != NULL) {
|
|
|
|
error = copyin(uap->tv, &tv, sizeof(tv));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
tvp = &tv;
|
|
|
|
} else
|
|
|
|
tvp = NULL;
|
|
|
|
|
|
|
|
return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
|
|
|
|
fd_set *fd_ex, struct timeval *tvp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-01-13 11:58:06 +00:00
|
|
|
struct filedesc *fdp;
|
1997-02-20 11:51:52 +00:00
|
|
|
/*
|
|
|
|
* The magic 2048 here is chosen to be just enough for FD_SETSIZE
|
|
|
|
* infds with the new FD_SETSIZE of 1024, and more than enough for
|
|
|
|
* FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
|
|
|
|
* of 256.
|
|
|
|
*/
|
|
|
|
fd_mask s_selbits[howmany(2048, NFDBITS)];
|
2002-01-29 22:54:19 +00:00
|
|
|
fd_mask *ibits[3], *obits[3], *selbits, *sbp;
|
1998-04-04 13:26:20 +00:00
|
|
|
struct timeval atv, rtv, ttv;
|
2002-06-12 02:08:18 +00:00
|
|
|
int error, timo;
|
|
|
|
u_int ncoll, nbufbytes, ncpbytes, nfdbits;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-09-01 20:37:28 +00:00
|
|
|
if (nd < 0)
|
1996-08-20 15:03:41 +00:00
|
|
|
return (EINVAL);
|
2002-01-13 11:58:06 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
2004-11-17 08:01:10 +00:00
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2002-09-01 20:37:28 +00:00
|
|
|
if (nd > td->td_proc->p_fd->fd_nfiles)
|
|
|
|
nd = td->td_proc->p_fd->fd_nfiles; /* forgiving; slightly wrong */
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
1996-08-20 07:18:10 +00:00
|
|
|
|
|
|
|
/*
|
1997-02-20 11:51:52 +00:00
|
|
|
* Allocate just enough bits for the non-null fd_sets. Use the
|
|
|
|
* preallocated auto buffer if possible.
|
1996-08-20 07:18:10 +00:00
|
|
|
*/
|
2002-09-01 20:37:28 +00:00
|
|
|
nfdbits = roundup(nd, NFDBITS);
|
1997-02-20 11:51:52 +00:00
|
|
|
ncpbytes = nfdbits / NBBY;
|
|
|
|
nbufbytes = 0;
|
2002-09-01 20:37:28 +00:00
|
|
|
if (fd_in != NULL)
|
1997-02-20 11:51:52 +00:00
|
|
|
nbufbytes += 2 * ncpbytes;
|
2002-09-01 20:37:28 +00:00
|
|
|
if (fd_ou != NULL)
|
1997-02-20 11:51:52 +00:00
|
|
|
nbufbytes += 2 * ncpbytes;
|
2002-09-01 20:37:28 +00:00
|
|
|
if (fd_ex != NULL)
|
1997-02-20 11:51:52 +00:00
|
|
|
nbufbytes += 2 * ncpbytes;
|
|
|
|
if (nbufbytes <= sizeof s_selbits)
|
|
|
|
selbits = &s_selbits[0];
|
|
|
|
else
|
2003-02-19 05:47:46 +00:00
|
|
|
selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-20 11:51:52 +00:00
|
|
|
/*
|
|
|
|
* Assign pointers into the bit buffers and fetch the input bits.
|
|
|
|
* Put the output buffers together so that they can be bzeroed
|
|
|
|
* together.
|
|
|
|
*/
|
|
|
|
sbp = selbits;
|
1994-05-24 10:09:53 +00:00
|
|
|
#define getbits(name, x) \
|
1997-02-20 11:51:52 +00:00
|
|
|
do { \
|
2002-09-01 20:37:28 +00:00
|
|
|
if (name == NULL) \
|
1997-02-20 11:51:52 +00:00
|
|
|
ibits[x] = NULL; \
|
|
|
|
else { \
|
|
|
|
ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
|
|
|
|
obits[x] = sbp; \
|
|
|
|
sbp += ncpbytes / sizeof *sbp; \
|
2002-09-01 20:37:28 +00:00
|
|
|
error = copyin(name, ibits[x], ncpbytes); \
|
2001-05-14 05:26:48 +00:00
|
|
|
if (error != 0) \
|
2002-03-14 01:32:30 +00:00
|
|
|
goto done_nosellock; \
|
1997-02-20 11:51:52 +00:00
|
|
|
} \
|
|
|
|
} while (0)
|
2002-09-01 20:37:28 +00:00
|
|
|
getbits(fd_in, 0);
|
|
|
|
getbits(fd_ou, 1);
|
|
|
|
getbits(fd_ex, 2);
|
1994-05-24 10:09:53 +00:00
|
|
|
#undef getbits
|
1997-02-20 11:51:52 +00:00
|
|
|
if (nbufbytes != 0)
|
|
|
|
bzero(selbits, nbufbytes / 2);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-09-01 20:37:28 +00:00
|
|
|
if (tvp != NULL) {
|
|
|
|
atv = *tvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (itimerfix(&atv)) {
|
|
|
|
error = EINVAL;
|
2002-03-14 01:32:30 +00:00
|
|
|
goto done_nosellock;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(&rtv);
|
1998-04-04 13:26:20 +00:00
|
|
|
timevaladd(&atv, &rtv);
|
2000-07-13 02:12:25 +00:00
|
|
|
} else {
|
1998-04-04 13:26:20 +00:00
|
|
|
atv.tv_sec = 0;
|
2000-07-13 02:12:25 +00:00
|
|
|
atv.tv_usec = 0;
|
|
|
|
}
|
1998-04-04 13:26:20 +00:00
|
|
|
timo = 0;
|
2002-08-23 22:43:28 +00:00
|
|
|
TAILQ_INIT(&td->td_selq);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_lock(&sellock);
|
1994-05-24 10:09:53 +00:00
|
|
|
retry:
|
|
|
|
ncoll = nselcoll;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_flags |= TDF_SELECT;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_unlock(&sellock);
|
|
|
|
|
2002-09-01 20:37:28 +00:00
|
|
|
error = selscan(td, ibits, obits, nd);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_lock(&sellock);
|
2001-09-12 08:38:13 +00:00
|
|
|
if (error || td->td_retval[0])
|
1994-05-24 10:09:53 +00:00
|
|
|
goto done;
|
2000-07-12 22:46:40 +00:00
|
|
|
if (atv.tv_sec || atv.tv_usec) {
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(&rtv);
|
2002-03-14 01:32:30 +00:00
|
|
|
if (timevalcmp(&rtv, &atv, >=))
|
1998-04-04 13:26:20 +00:00
|
|
|
goto done;
|
|
|
|
ttv = atv;
|
|
|
|
timevalsub(&ttv, &rtv);
|
|
|
|
timo = ttv.tv_sec > 24 * 60 * 60 ?
|
|
|
|
24 * 60 * 60 * hz : tvtohz(&ttv);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-03-14 01:32:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* An event of interest may occur while we do not hold
|
|
|
|
* sellock, so check TDF_SELECT and the number of
|
|
|
|
* collisions and rescan the file descriptors if
|
|
|
|
* necessary.
|
|
|
|
*/
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) {
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2001-05-14 05:26:48 +00:00
|
|
|
if (timo > 0)
|
2002-03-14 01:32:30 +00:00
|
|
|
error = cv_timedwait_sig(&selwait, &sellock, timo);
|
2001-05-14 05:26:48 +00:00
|
|
|
else
|
2002-03-14 01:32:30 +00:00
|
|
|
error = cv_wait_sig(&selwait, &sellock);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == 0)
|
|
|
|
goto retry;
|
2001-05-14 05:26:48 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
done:
|
2002-03-14 01:32:30 +00:00
|
|
|
clear_selinfo_list(td);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_flags &= ~TDF_SELECT;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_unlock(&sellock);
|
|
|
|
|
|
|
|
done_nosellock:
|
1994-05-24 10:09:53 +00:00
|
|
|
/* select is not restarted after signals... */
|
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
if (error == EWOULDBLOCK)
|
|
|
|
error = 0;
|
|
|
|
#define putbits(name, x) \
|
2002-09-01 20:37:28 +00:00
|
|
|
if (name && (error2 = copyout(obits[x], name, ncpbytes))) \
|
1994-05-24 10:09:53 +00:00
|
|
|
error = error2;
|
|
|
|
if (error == 0) {
|
|
|
|
int error2;
|
|
|
|
|
2002-09-01 20:37:28 +00:00
|
|
|
putbits(fd_in, 0);
|
|
|
|
putbits(fd_ou, 1);
|
|
|
|
putbits(fd_ex, 2);
|
1994-05-24 10:09:53 +00:00
|
|
|
#undef putbits
|
|
|
|
}
|
1997-02-20 11:51:52 +00:00
|
|
|
if (selbits != &s_selbits[0])
|
|
|
|
free(selbits, M_SELECT);
|
2001-09-01 19:34:23 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1995-12-14 08:32:45 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
selscan(td, ibits, obits, nfd)
|
|
|
|
struct thread *td;
|
1996-08-20 07:18:10 +00:00
|
|
|
fd_mask **ibits, **obits;
|
1997-11-06 19:29:57 +00:00
|
|
|
int nfd;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-02-20 13:36:26 +00:00
|
|
|
int msk, i, fd;
|
|
|
|
fd_mask bits;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
int n = 0;
|
1997-11-23 10:30:50 +00:00
|
|
|
/* Note: backend also returns POLLHUP/POLLERR if appropriate. */
|
|
|
|
static int flag[3] = { POLLRDNORM, POLLWRNORM, POLLRDBAND };
|
2002-01-29 22:54:19 +00:00
|
|
|
struct filedesc *fdp = td->td_proc->p_fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
1994-05-24 10:09:53 +00:00
|
|
|
for (msk = 0; msk < 3; msk++) {
|
1997-02-20 11:51:52 +00:00
|
|
|
if (ibits[msk] == NULL)
|
|
|
|
continue;
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < nfd; i += NFDBITS) {
|
1996-08-20 07:18:10 +00:00
|
|
|
bits = ibits[msk][i/NFDBITS];
|
2000-02-20 13:36:26 +00:00
|
|
|
/* ffs(int mask) not portable, fd_mask is long */
|
|
|
|
for (fd = i; bits && fd < nfd; fd++, bits >>= 1) {
|
|
|
|
if (!(bits & 1))
|
|
|
|
continue;
|
2002-01-29 22:54:19 +00:00
|
|
|
if ((fp = fget_locked(fdp, fd)) == NULL) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBADF);
|
2002-01-29 22:54:19 +00:00
|
|
|
}
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
if (fo_poll(fp, flag[msk], td->td_ucred,
|
|
|
|
td)) {
|
1997-02-20 11:51:52 +00:00
|
|
|
obits[msk][(fd)/NFDBITS] |=
|
2000-02-20 13:36:26 +00:00
|
|
|
((fd_mask)1 << ((fd) % NFDBITS));
|
1994-05-24 10:09:53 +00:00
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = n;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-09-14 02:30:32 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct poll_args {
|
|
|
|
struct pollfd *fds;
|
|
|
|
u_int nfds;
|
|
|
|
int timeout;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
poll(td, uap)
|
|
|
|
struct thread *td;
|
2001-02-27 15:11:31 +00:00
|
|
|
struct poll_args *uap;
|
1997-09-14 02:30:32 +00:00
|
|
|
{
|
2004-08-27 21:23:50 +00:00
|
|
|
struct pollfd *bits;
|
|
|
|
struct pollfd smallbits[32];
|
1998-04-04 13:26:20 +00:00
|
|
|
struct timeval atv, rtv, ttv;
|
2002-06-12 02:08:18 +00:00
|
|
|
int error = 0, timo;
|
|
|
|
u_int ncoll, nfds;
|
1997-09-14 02:30:32 +00:00
|
|
|
size_t ni;
|
|
|
|
|
2002-12-14 01:56:26 +00:00
|
|
|
nfds = uap->nfds;
|
2001-09-01 19:34:23 +00:00
|
|
|
|
2001-02-07 23:28:01 +00:00
|
|
|
/*
|
2001-02-09 08:10:22 +00:00
|
|
|
* This is kinda bogus. We have fd limits, but that is not
|
|
|
|
* really related to the size of the pollfd array. Make sure
|
|
|
|
* we let the process use at least FD_SETSIZE entries and at
|
|
|
|
* least enough for the current limits. We want to be reasonably
|
|
|
|
* safe, but not overly restrictive.
|
2001-02-07 23:28:01 +00:00
|
|
|
*/
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_LOCK(td->td_proc);
|
|
|
|
if ((nfds > lim_cur(td->td_proc, RLIMIT_NOFILE)) &&
|
2001-09-12 08:38:13 +00:00
|
|
|
(nfds > FD_SETSIZE)) {
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_UNLOCK(td->td_proc);
|
2001-09-01 19:34:23 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto done2;
|
|
|
|
}
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_UNLOCK(td->td_proc);
|
2001-02-07 23:28:01 +00:00
|
|
|
ni = nfds * sizeof(struct pollfd);
|
1997-09-14 02:30:32 +00:00
|
|
|
if (ni > sizeof(smallbits))
|
2003-02-19 05:47:46 +00:00
|
|
|
bits = malloc(ni, M_TEMP, M_WAITOK);
|
1997-09-14 02:30:32 +00:00
|
|
|
else
|
|
|
|
bits = smallbits;
|
2002-12-14 01:56:26 +00:00
|
|
|
error = copyin(uap->fds, bits, ni);
|
1997-09-14 02:30:32 +00:00
|
|
|
if (error)
|
2002-03-14 01:32:30 +00:00
|
|
|
goto done_nosellock;
|
2002-12-14 01:56:26 +00:00
|
|
|
if (uap->timeout != INFTIM) {
|
|
|
|
atv.tv_sec = uap->timeout / 1000;
|
|
|
|
atv.tv_usec = (uap->timeout % 1000) * 1000;
|
1997-09-14 02:30:32 +00:00
|
|
|
if (itimerfix(&atv)) {
|
|
|
|
error = EINVAL;
|
2002-03-14 01:32:30 +00:00
|
|
|
goto done_nosellock;
|
1997-09-14 02:30:32 +00:00
|
|
|
}
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(&rtv);
|
1998-04-04 13:26:20 +00:00
|
|
|
timevaladd(&atv, &rtv);
|
2000-07-13 02:12:25 +00:00
|
|
|
} else {
|
1998-04-04 13:26:20 +00:00
|
|
|
atv.tv_sec = 0;
|
2000-07-13 02:12:25 +00:00
|
|
|
atv.tv_usec = 0;
|
|
|
|
}
|
1998-04-04 13:26:20 +00:00
|
|
|
timo = 0;
|
2002-08-23 22:43:28 +00:00
|
|
|
TAILQ_INIT(&td->td_selq);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_lock(&sellock);
|
1997-09-14 02:30:32 +00:00
|
|
|
retry:
|
|
|
|
ncoll = nselcoll;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_flags |= TDF_SELECT;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_unlock(&sellock);
|
|
|
|
|
2004-08-27 21:23:50 +00:00
|
|
|
error = pollscan(td, bits, nfds);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_lock(&sellock);
|
2001-09-12 08:38:13 +00:00
|
|
|
if (error || td->td_retval[0])
|
1997-09-14 02:30:32 +00:00
|
|
|
goto done;
|
2000-07-12 22:46:40 +00:00
|
|
|
if (atv.tv_sec || atv.tv_usec) {
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(&rtv);
|
2002-03-14 01:32:30 +00:00
|
|
|
if (timevalcmp(&rtv, &atv, >=))
|
1998-04-04 13:26:20 +00:00
|
|
|
goto done;
|
|
|
|
ttv = atv;
|
|
|
|
timevalsub(&ttv, &rtv);
|
|
|
|
timo = ttv.tv_sec > 24 * 60 * 60 ?
|
|
|
|
24 * 60 * 60 * hz : tvtohz(&ttv);
|
1997-09-14 02:30:32 +00:00
|
|
|
}
|
2002-03-14 01:32:30 +00:00
|
|
|
/*
|
|
|
|
* An event of interest may occur while we do not hold
|
|
|
|
* sellock, so check TDF_SELECT and the number of collisions
|
|
|
|
* and rescan the file descriptors if necessary.
|
|
|
|
*/
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) {
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
|
2001-05-14 05:26:48 +00:00
|
|
|
if (timo > 0)
|
2002-03-14 01:32:30 +00:00
|
|
|
error = cv_timedwait_sig(&selwait, &sellock, timo);
|
2001-05-14 05:26:48 +00:00
|
|
|
else
|
2002-03-14 01:32:30 +00:00
|
|
|
error = cv_wait_sig(&selwait, &sellock);
|
|
|
|
|
1997-09-14 02:30:32 +00:00
|
|
|
if (error == 0)
|
|
|
|
goto retry;
|
2001-05-14 05:26:48 +00:00
|
|
|
|
1997-09-14 02:30:32 +00:00
|
|
|
done:
|
2002-03-14 01:32:30 +00:00
|
|
|
clear_selinfo_list(td);
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_flags &= ~TDF_SELECT;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_unlock(&sellock);
|
|
|
|
|
|
|
|
done_nosellock:
|
1997-09-14 02:30:32 +00:00
|
|
|
/* poll is not restarted after signals... */
|
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
if (error == EWOULDBLOCK)
|
|
|
|
error = 0;
|
|
|
|
if (error == 0) {
|
2002-12-14 01:56:26 +00:00
|
|
|
error = copyout(bits, uap->fds, ni);
|
1997-09-14 02:30:32 +00:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
if (ni > sizeof(smallbits))
|
|
|
|
free(bits, M_TEMP);
|
2001-09-01 19:34:23 +00:00
|
|
|
done2:
|
1997-09-14 02:30:32 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
pollscan(td, fds, nfd)
|
|
|
|
struct thread *td;
|
1997-09-14 02:30:32 +00:00
|
|
|
struct pollfd *fds;
|
2001-02-27 15:11:31 +00:00
|
|
|
u_int nfd;
|
1997-09-14 02:30:32 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
register struct filedesc *fdp = td->td_proc->p_fd;
|
1997-09-14 02:30:32 +00:00
|
|
|
int i;
|
|
|
|
struct file *fp;
|
|
|
|
int n = 0;
|
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
1997-09-14 02:30:32 +00:00
|
|
|
for (i = 0; i < nfd; i++, fds++) {
|
1998-12-10 01:53:26 +00:00
|
|
|
if (fds->fd >= fdp->fd_nfiles) {
|
1997-09-14 02:30:32 +00:00
|
|
|
fds->revents = POLLNVAL;
|
|
|
|
n++;
|
1998-12-10 01:53:26 +00:00
|
|
|
} else if (fds->fd < 0) {
|
|
|
|
fds->revents = 0;
|
1997-09-14 02:30:32 +00:00
|
|
|
} else {
|
|
|
|
fp = fdp->fd_ofiles[fds->fd];
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fp == NULL) {
|
1997-09-14 02:30:32 +00:00
|
|
|
fds->revents = POLLNVAL;
|
|
|
|
n++;
|
|
|
|
} else {
|
1997-11-23 10:30:50 +00:00
|
|
|
/*
|
|
|
|
* Note: backend also returns POLLHUP and
|
|
|
|
* POLLERR if appropriate.
|
|
|
|
*/
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
fds->revents = fo_poll(fp, fds->events,
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
td->td_ucred, td);
|
1997-09-14 02:30:32 +00:00
|
|
|
if (fds->revents != 0)
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = n;
|
1997-09-14 02:30:32 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OpenBSD poll system call.
|
2007-03-04 22:36:48 +00:00
|
|
|
*
|
1997-09-14 02:30:32 +00:00
|
|
|
* XXX this isn't quite a true representation.. OpenBSD uses select ops.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct openbsd_poll_args {
|
|
|
|
struct pollfd *fds;
|
|
|
|
u_int nfds;
|
|
|
|
int timeout;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
openbsd_poll(td, uap)
|
|
|
|
register struct thread *td;
|
1997-09-14 02:30:32 +00:00
|
|
|
register struct openbsd_poll_args *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
return (poll(td, (struct poll_args *)uap));
|
1997-09-14 02:30:32 +00:00
|
|
|
}
|
|
|
|
|
2002-03-14 01:32:30 +00:00
|
|
|
/*
|
2007-03-04 22:36:48 +00:00
|
|
|
* Remove the references to the thread from all of the objects we were
|
|
|
|
* polling.
|
2002-03-14 01:32:30 +00:00
|
|
|
*
|
2007-03-04 22:36:48 +00:00
|
|
|
* This code assumes that the underlying owner of the selinfo structure will
|
|
|
|
* hold sellock before it changes it, and that it will unlink itself from our
|
|
|
|
* list if it goes away.
|
2002-03-14 01:32:30 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
clear_selinfo_list(td)
|
|
|
|
struct thread *td;
|
|
|
|
{
|
|
|
|
struct selinfo *si;
|
|
|
|
|
|
|
|
mtx_assert(&sellock, MA_OWNED);
|
|
|
|
TAILQ_FOREACH(si, &td->td_selq, si_thrlist)
|
|
|
|
si->si_thread = NULL;
|
|
|
|
TAILQ_INIT(&td->td_selq);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Record a select request.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
selrecord(selector, sip)
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *selector;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct selinfo *sip;
|
|
|
|
{
|
|
|
|
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_lock(&sellock);
|
|
|
|
/*
|
2002-07-24 00:29:22 +00:00
|
|
|
* If the selinfo's thread pointer is NULL then take ownership of it.
|
|
|
|
*
|
|
|
|
* If the thread pointer is not NULL and it points to another
|
|
|
|
* thread, then we have a collision.
|
|
|
|
*
|
|
|
|
* If the thread pointer is not NULL and points back to us then leave
|
|
|
|
* it alone as we've already added pointed it at us and added it to
|
|
|
|
* our list.
|
2002-03-14 01:32:30 +00:00
|
|
|
*/
|
|
|
|
if (sip->si_thread == NULL) {
|
|
|
|
sip->si_thread = selector;
|
|
|
|
TAILQ_INSERT_TAIL(&selector->td_selq, sip, si_thrlist);
|
|
|
|
} else if (sip->si_thread != selector) {
|
|
|
|
sip->si_flags |= SI_COLL;
|
2001-01-24 11:12:37 +00:00
|
|
|
}
|
2002-03-14 01:32:30 +00:00
|
|
|
|
|
|
|
mtx_unlock(&sellock);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2003-11-09 09:17:26 +00:00
|
|
|
/* Wake up a selecting thread. */
|
|
|
|
void
|
|
|
|
selwakeup(sip)
|
|
|
|
struct selinfo *sip;
|
|
|
|
{
|
|
|
|
doselwakeup(sip, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wake up a selecting thread, and set its priority. */
|
|
|
|
void
|
|
|
|
selwakeuppri(sip, pri)
|
|
|
|
struct selinfo *sip;
|
|
|
|
int pri;
|
|
|
|
{
|
|
|
|
doselwakeup(sip, pri);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Do a wakeup when a selectable event occurs.
|
|
|
|
*/
|
2003-11-09 09:17:26 +00:00
|
|
|
static void
|
|
|
|
doselwakeup(sip, pri)
|
2002-03-14 01:32:30 +00:00
|
|
|
struct selinfo *sip;
|
2003-11-09 09:17:26 +00:00
|
|
|
int pri;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_lock(&sellock);
|
|
|
|
td = sip->si_thread;
|
|
|
|
if ((sip->si_flags & SI_COLL) != 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
nselcoll++;
|
|
|
|
sip->si_flags &= ~SI_COLL;
|
2003-11-09 09:17:26 +00:00
|
|
|
cv_broadcastpri(&selwait, pri);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-03-14 01:32:30 +00:00
|
|
|
if (td == NULL) {
|
|
|
|
mtx_unlock(&sellock);
|
|
|
|
return;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-03-14 01:32:30 +00:00
|
|
|
TAILQ_REMOVE(&td->td_selq, sip, si_thrlist);
|
|
|
|
sip->si_thread = NULL;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_lock(td);
|
Switch the sleep/wakeup and condition variable implementations to use the
sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
and condition variables. Having sleep qeueus in a hash table avoids
having to allocate a queue head for each wait channel. Thus, struct cv
has shrunk down to just a single char * pointer now. However, the
hash table does not hold threads directly, but queue heads. This means
that once you have located a queue in the hash bucket, you no longer have
to walk the rest of the hash chain looking for threads. Instead, you have
a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
differentiates between cv's and sleep/wakeup. For example, calls to
abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and
cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
sleep's no longer inherently bump the priority. Instead, this is soley
a propery of msleep() which explicitly calls sched_prio() before
blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used. The
associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
dropped and replaced with a single explicit clearing of td_wchan.
TD_SET_ONSLEEPQ() would really have only made sense if it had taken
the wait channel and message as arguments anyway. Now that that only
happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
|
|
|
td->td_flags &= ~TDF_SELECT;
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
thread_unlock(td);
|
Switch the sleep/wakeup and condition variable implementations to use the
sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
and condition variables. Having sleep qeueus in a hash table avoids
having to allocate a queue head for each wait channel. Thus, struct cv
has shrunk down to just a single char * pointer now. However, the
hash table does not hold threads directly, but queue heads. This means
that once you have located a queue in the hash bucket, you no longer have
to walk the rest of the hash chain looking for threads. Instead, you have
a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
differentiates between cv's and sleep/wakeup. For example, calls to
abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and
cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
sleep's no longer inherently bump the priority. Instead, this is soley
a propery of msleep() which explicitly calls sched_prio() before
blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used. The
associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
dropped and replaced with a single explicit clearing of td_wchan.
TD_SET_ONSLEEPQ() would really have only made sense if it had taken
the wait channel and message as arguments anyway. Now that that only
happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
|
|
|
sleepq_remove(td, &selwait);
|
2002-03-14 01:32:30 +00:00
|
|
|
mtx_unlock(&sellock);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-05-14 05:26:48 +00:00
|
|
|
|
2002-03-19 21:25:46 +00:00
|
|
|
static void selectinit(void *);
|
2001-05-14 05:26:48 +00:00
|
|
|
SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, selectinit, NULL)
|
|
|
|
|
|
|
|
/* ARGSUSED*/
|
|
|
|
static void
|
|
|
|
selectinit(dummy)
|
|
|
|
void *dummy;
|
|
|
|
{
|
|
|
|
cv_init(&selwait, "select");
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&sellock, "sellck", NULL, MTX_DEF);
|
2001-05-14 05:26:48 +00:00
|
|
|
}
|