1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1982, 1986, 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1996-01-03 21:42:35 +00:00
|
|
|
#include "opt_ktrace.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1995-11-12 06:43:28 +00:00
|
|
|
#include <sys/sysproto.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/filedesc.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/filio.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/proc.h>
|
1994-10-02 17:35:40 +00:00
|
|
|
#include <sys/signalvar.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/socketvar.h>
|
|
|
|
#include <sys/uio.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
1997-09-14 02:30:32 +00:00
|
|
|
#include <sys/poll.h>
|
2001-02-07 23:28:01 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2001-01-09 04:33:49 +00:00
|
|
|
#include <sys/selinfo.h>
|
2000-01-05 19:40:17 +00:00
|
|
|
#include <sys/sysctl.h>
|
1997-09-14 02:30:32 +00:00
|
|
|
#include <sys/sysent.h>
|
2000-11-18 21:01:04 +00:00
|
|
|
#include <sys/bio.h>
|
|
|
|
#include <sys/buf.h>
|
2001-05-14 05:26:48 +00:00
|
|
|
#include <sys/condvar.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
2000-11-18 21:01:04 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_page.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1998-08-24 08:39:39 +00:00
|
|
|
#include <machine/limits.h>
|
|
|
|
|
1997-10-12 20:26:33 +00:00
|
|
|
static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
|
|
|
|
static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
|
|
|
|
MALLOC_DEFINE(M_IOV, "iov", "large iov's");
|
1997-10-11 18:31:40 +00:00
|
|
|
|
2001-02-27 15:11:31 +00:00
|
|
|
static int pollscan __P((struct proc *, struct pollfd *, u_int));
|
2001-05-14 05:26:48 +00:00
|
|
|
static int pollholddrop __P((struct proc *, struct pollfd *, u_int, int));
|
1997-11-23 10:30:50 +00:00
|
|
|
static int selscan __P((struct proc *, fd_mask **, fd_mask **, int));
|
2001-05-14 05:26:48 +00:00
|
|
|
static int selholddrop __P((struct proc *, fd_mask *, fd_mask *, int, int));
|
1999-04-04 21:41:28 +00:00
|
|
|
static int dofileread __P((struct proc *, struct file *, int, void *,
|
|
|
|
size_t, off_t, int));
|
|
|
|
static int dofilewrite __P((struct proc *, struct file *, int,
|
|
|
|
const void *, size_t, off_t, int));
|
|
|
|
|
2000-06-12 18:06:12 +00:00
|
|
|
struct file*
|
2000-11-18 21:01:04 +00:00
|
|
|
holdfp(fdp, fd, flag)
|
1999-04-04 21:41:28 +00:00
|
|
|
struct filedesc* fdp;
|
|
|
|
int fd, flag;
|
|
|
|
{
|
|
|
|
struct file* fp;
|
|
|
|
|
|
|
|
if (((u_int)fd) >= fdp->fd_nfiles ||
|
|
|
|
(fp = fdp->fd_ofiles[fd]) == NULL ||
|
2000-11-18 21:01:04 +00:00
|
|
|
(fp->f_flag & flag) == 0) {
|
1999-04-04 21:41:28 +00:00
|
|
|
return (NULL);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
|
|
|
fhold(fp);
|
1999-04-04 21:41:28 +00:00
|
|
|
return (fp);
|
|
|
|
}
|
1994-10-10 01:00:49 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Read system call.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct read_args {
|
|
|
|
int fd;
|
1998-09-05 14:30:11 +00:00
|
|
|
void *buf;
|
|
|
|
size_t nbyte;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
read(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
|
|
|
register struct read_args *uap;
|
|
|
|
{
|
|
|
|
register struct file *fp;
|
2000-11-18 21:01:04 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((fp = holdfp(p->p_fd, uap->fd, FREAD)) == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBADF);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = dofileread(p, fp, uap->fd, uap->buf, uap->nbyte, (off_t)-1, 0);
|
|
|
|
fdrop(fp, p);
|
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1999-03-27 21:16:58 +00:00
|
|
|
/*
|
1999-04-04 21:41:28 +00:00
|
|
|
* Pread system call
|
1999-03-27 21:16:58 +00:00
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct pread_args {
|
|
|
|
int fd;
|
|
|
|
void *buf;
|
|
|
|
size_t nbyte;
|
1999-04-04 21:41:28 +00:00
|
|
|
int pad;
|
|
|
|
off_t offset;
|
1999-03-27 21:16:58 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
pread(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct pread_args *uap;
|
|
|
|
{
|
|
|
|
register struct file *fp;
|
2000-11-18 21:01:04 +00:00
|
|
|
int error;
|
1999-04-04 21:41:28 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((fp = holdfp(p->p_fd, uap->fd, FREAD)) == NULL)
|
1999-04-04 21:41:28 +00:00
|
|
|
return (EBADF);
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE) {
|
|
|
|
error = ESPIPE;
|
|
|
|
} else {
|
|
|
|
error = dofileread(p, fp, uap->fd, uap->buf, uap->nbyte,
|
|
|
|
uap->offset, FOF_OFFSET);
|
|
|
|
}
|
|
|
|
fdrop(fp, p);
|
|
|
|
return(error);
|
1999-04-04 21:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Code common for read and pread
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
dofileread(p, fp, fd, buf, nbyte, offset, flags)
|
|
|
|
struct proc *p;
|
|
|
|
struct file *fp;
|
|
|
|
int fd, flags;
|
|
|
|
void *buf;
|
|
|
|
size_t nbyte;
|
|
|
|
off_t offset;
|
|
|
|
{
|
1999-03-27 21:16:58 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
long cnt, error = 0;
|
|
|
|
#ifdef KTRACE
|
|
|
|
struct iovec ktriov;
|
2000-07-02 08:08:09 +00:00
|
|
|
struct uio ktruio;
|
2000-07-27 03:45:18 +00:00
|
|
|
int didktr = 0;
|
1999-03-27 21:16:58 +00:00
|
|
|
#endif
|
|
|
|
|
1999-04-04 21:41:28 +00:00
|
|
|
aiov.iov_base = (caddr_t)buf;
|
|
|
|
aiov.iov_len = nbyte;
|
1999-03-27 21:16:58 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
1999-04-04 21:41:28 +00:00
|
|
|
auio.uio_offset = offset;
|
|
|
|
if (nbyte > INT_MAX)
|
1999-03-27 21:16:58 +00:00
|
|
|
return (EINVAL);
|
1999-04-04 21:41:28 +00:00
|
|
|
auio.uio_resid = nbyte;
|
1999-03-27 21:16:58 +00:00
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
|
|
|
#ifdef KTRACE
|
|
|
|
/*
|
|
|
|
* if tracing, save a copy of iovec
|
|
|
|
*/
|
2000-07-02 08:08:09 +00:00
|
|
|
if (KTRPOINT(p, KTR_GENIO)) {
|
1999-03-27 21:16:58 +00:00
|
|
|
ktriov = aiov;
|
2000-07-02 08:08:09 +00:00
|
|
|
ktruio = auio;
|
2000-07-27 03:45:18 +00:00
|
|
|
didktr = 1;
|
2000-07-02 08:08:09 +00:00
|
|
|
}
|
1999-03-27 21:16:58 +00:00
|
|
|
#endif
|
1999-04-04 21:41:28 +00:00
|
|
|
cnt = nbyte;
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
if ((error = fo_read(fp, &auio, fp->f_cred, flags, p))) {
|
1999-03-27 21:16:58 +00:00
|
|
|
if (auio.uio_resid != cnt && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1999-03-27 21:16:58 +00:00
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
#ifdef KTRACE
|
2000-07-27 03:45:18 +00:00
|
|
|
if (didktr && error == 0) {
|
2000-07-02 08:08:09 +00:00
|
|
|
ktruio.uio_iov = &ktriov;
|
|
|
|
ktruio.uio_resid = cnt;
|
|
|
|
ktrgenio(p->p_tracep, fd, UIO_READ, &ktruio, error);
|
|
|
|
}
|
1999-03-27 21:16:58 +00:00
|
|
|
#endif
|
|
|
|
p->p_retval[0] = cnt;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Scatter read system call.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct readv_args {
|
1995-11-11 06:57:34 +00:00
|
|
|
int fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct iovec *iovp;
|
|
|
|
u_int iovcnt;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
readv(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
|
|
|
register struct readv_args *uap;
|
|
|
|
{
|
|
|
|
register struct file *fp;
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
struct uio auio;
|
|
|
|
register struct iovec *iov;
|
|
|
|
struct iovec *needfree;
|
|
|
|
struct iovec aiov[UIO_SMALLIOV];
|
|
|
|
long i, cnt, error = 0;
|
|
|
|
u_int iovlen;
|
|
|
|
#ifdef KTRACE
|
|
|
|
struct iovec *ktriov = NULL;
|
2000-07-02 08:08:09 +00:00
|
|
|
struct uio ktruio;
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((fp = holdfp(fdp, uap->fd, FREAD)) == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBADF);
|
|
|
|
/* note: can't use iovlen until iovcnt is validated */
|
|
|
|
iovlen = uap->iovcnt * sizeof (struct iovec);
|
|
|
|
if (uap->iovcnt > UIO_SMALLIOV) {
|
|
|
|
if (uap->iovcnt > UIO_MAXIOV)
|
|
|
|
return (EINVAL);
|
|
|
|
MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
|
|
|
|
needfree = iov;
|
|
|
|
} else {
|
|
|
|
iov = aiov;
|
|
|
|
needfree = NULL;
|
|
|
|
}
|
|
|
|
auio.uio_iov = iov;
|
|
|
|
auio.uio_iovcnt = uap->iovcnt;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
1997-06-16 00:29:36 +00:00
|
|
|
auio.uio_offset = -1;
|
1994-09-25 19:34:02 +00:00
|
|
|
if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
|
1994-05-24 10:09:53 +00:00
|
|
|
goto done;
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
for (i = 0; i < uap->iovcnt; i++) {
|
1998-08-24 08:39:39 +00:00
|
|
|
if (iov->iov_len > INT_MAX - auio.uio_resid) {
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
1998-08-24 08:39:39 +00:00
|
|
|
auio.uio_resid += iov->iov_len;
|
1994-05-24 10:09:53 +00:00
|
|
|
iov++;
|
|
|
|
}
|
|
|
|
#ifdef KTRACE
|
|
|
|
/*
|
|
|
|
* if tracing, save a copy of iovec
|
|
|
|
*/
|
|
|
|
if (KTRPOINT(p, KTR_GENIO)) {
|
|
|
|
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
|
|
|
|
bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen);
|
2000-07-02 08:08:09 +00:00
|
|
|
ktruio = auio;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
cnt = auio.uio_resid;
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((error = fo_read(fp, &auio, fp->f_cred, 0, p))) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (auio.uio_resid != cnt && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (ktriov != NULL) {
|
2000-07-02 08:08:09 +00:00
|
|
|
if (error == 0) {
|
|
|
|
ktruio.uio_iov = ktriov;
|
|
|
|
ktruio.uio_resid = cnt;
|
|
|
|
ktrgenio(p->p_tracep, uap->fd, UIO_READ, &ktruio,
|
|
|
|
error);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
FREE(ktriov, M_TEMP);
|
|
|
|
}
|
|
|
|
#endif
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = cnt;
|
1994-05-24 10:09:53 +00:00
|
|
|
done:
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (needfree)
|
|
|
|
FREE(needfree, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write system call
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct write_args {
|
|
|
|
int fd;
|
1998-09-05 14:30:11 +00:00
|
|
|
const void *buf;
|
|
|
|
size_t nbyte;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
write(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
|
|
|
register struct write_args *uap;
|
|
|
|
{
|
|
|
|
register struct file *fp;
|
2000-11-18 21:01:04 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((fp = holdfp(p->p_fd, uap->fd, FWRITE)) == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBADF);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = dofilewrite(p, fp, uap->fd, uap->buf, uap->nbyte, (off_t)-1, 0);
|
|
|
|
fdrop(fp, p);
|
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1999-03-27 21:16:58 +00:00
|
|
|
/*
|
1999-04-04 21:41:28 +00:00
|
|
|
* Pwrite system call
|
1999-03-27 21:16:58 +00:00
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct pwrite_args {
|
|
|
|
int fd;
|
|
|
|
const void *buf;
|
|
|
|
size_t nbyte;
|
1999-04-04 21:41:28 +00:00
|
|
|
int pad;
|
|
|
|
off_t offset;
|
1999-03-27 21:16:58 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
|
|
|
pwrite(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
register struct pwrite_args *uap;
|
|
|
|
{
|
|
|
|
register struct file *fp;
|
2000-11-18 21:01:04 +00:00
|
|
|
int error;
|
1999-04-04 21:41:28 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((fp = holdfp(p->p_fd, uap->fd, FWRITE)) == NULL)
|
1999-04-04 21:41:28 +00:00
|
|
|
return (EBADF);
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE) {
|
|
|
|
error = ESPIPE;
|
|
|
|
} else {
|
|
|
|
error = dofilewrite(p, fp, uap->fd, uap->buf, uap->nbyte,
|
|
|
|
uap->offset, FOF_OFFSET);
|
|
|
|
}
|
|
|
|
fdrop(fp, p);
|
|
|
|
return(error);
|
1999-04-04 21:41:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dofilewrite(p, fp, fd, buf, nbyte, offset, flags)
|
|
|
|
struct proc *p;
|
|
|
|
struct file *fp;
|
|
|
|
int fd, flags;
|
|
|
|
const void *buf;
|
|
|
|
size_t nbyte;
|
|
|
|
off_t offset;
|
|
|
|
{
|
1999-03-27 21:16:58 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
long cnt, error = 0;
|
|
|
|
#ifdef KTRACE
|
|
|
|
struct iovec ktriov;
|
2000-07-02 08:08:09 +00:00
|
|
|
struct uio ktruio;
|
2000-07-27 03:45:18 +00:00
|
|
|
int didktr = 0;
|
1999-03-27 21:16:58 +00:00
|
|
|
#endif
|
|
|
|
|
2000-07-28 22:17:42 +00:00
|
|
|
aiov.iov_base = (void *)(uintptr_t)buf;
|
1999-04-04 21:41:28 +00:00
|
|
|
aiov.iov_len = nbyte;
|
1999-03-27 21:16:58 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
1999-04-04 21:41:28 +00:00
|
|
|
auio.uio_offset = offset;
|
|
|
|
if (nbyte > INT_MAX)
|
1999-03-27 21:16:58 +00:00
|
|
|
return (EINVAL);
|
1999-04-04 21:41:28 +00:00
|
|
|
auio.uio_resid = nbyte;
|
1999-03-27 21:16:58 +00:00
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
|
|
|
#ifdef KTRACE
|
|
|
|
/*
|
2000-07-02 08:08:09 +00:00
|
|
|
* if tracing, save a copy of iovec and uio
|
1999-03-27 21:16:58 +00:00
|
|
|
*/
|
2000-07-02 08:08:09 +00:00
|
|
|
if (KTRPOINT(p, KTR_GENIO)) {
|
1999-03-27 21:16:58 +00:00
|
|
|
ktriov = aiov;
|
2000-07-02 08:08:09 +00:00
|
|
|
ktruio = auio;
|
2000-07-27 03:45:18 +00:00
|
|
|
didktr = 1;
|
2000-07-02 08:08:09 +00:00
|
|
|
}
|
1999-03-27 21:16:58 +00:00
|
|
|
#endif
|
1999-04-04 21:41:28 +00:00
|
|
|
cnt = nbyte;
|
2000-11-30 20:23:14 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE)
|
|
|
|
bwillwrite();
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
if ((error = fo_write(fp, &auio, fp->f_cred, flags, p))) {
|
1999-03-27 21:16:58 +00:00
|
|
|
if (auio.uio_resid != cnt && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2001-03-07 03:37:06 +00:00
|
|
|
if (error == EPIPE) {
|
|
|
|
PROC_LOCK(p);
|
1999-03-27 21:16:58 +00:00
|
|
|
psignal(p, SIGPIPE);
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
1999-03-27 21:16:58 +00:00
|
|
|
}
|
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
#ifdef KTRACE
|
2000-07-27 03:45:18 +00:00
|
|
|
if (didktr && error == 0) {
|
2000-07-02 08:08:09 +00:00
|
|
|
ktruio.uio_iov = &ktriov;
|
|
|
|
ktruio.uio_resid = cnt;
|
|
|
|
ktrgenio(p->p_tracep, fd, UIO_WRITE, &ktruio, error);
|
|
|
|
}
|
1999-03-27 21:16:58 +00:00
|
|
|
#endif
|
|
|
|
p->p_retval[0] = cnt;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Gather write system call
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct writev_args {
|
|
|
|
int fd;
|
|
|
|
struct iovec *iovp;
|
|
|
|
u_int iovcnt;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
writev(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
|
|
|
register struct writev_args *uap;
|
|
|
|
{
|
|
|
|
register struct file *fp;
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
struct uio auio;
|
|
|
|
register struct iovec *iov;
|
|
|
|
struct iovec *needfree;
|
|
|
|
struct iovec aiov[UIO_SMALLIOV];
|
|
|
|
long i, cnt, error = 0;
|
|
|
|
u_int iovlen;
|
|
|
|
#ifdef KTRACE
|
|
|
|
struct iovec *ktriov = NULL;
|
2000-07-02 08:08:09 +00:00
|
|
|
struct uio ktruio;
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((fp = holdfp(fdp, uap->fd, FWRITE)) == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBADF);
|
|
|
|
/* note: can't use iovlen until iovcnt is validated */
|
|
|
|
iovlen = uap->iovcnt * sizeof (struct iovec);
|
|
|
|
if (uap->iovcnt > UIO_SMALLIOV) {
|
1999-10-14 05:37:52 +00:00
|
|
|
if (uap->iovcnt > UIO_MAXIOV) {
|
|
|
|
needfree = NULL;
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
|
|
|
|
needfree = iov;
|
|
|
|
} else {
|
|
|
|
iov = aiov;
|
|
|
|
needfree = NULL;
|
|
|
|
}
|
|
|
|
auio.uio_iov = iov;
|
|
|
|
auio.uio_iovcnt = uap->iovcnt;
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_procp = p;
|
1997-06-16 00:29:36 +00:00
|
|
|
auio.uio_offset = -1;
|
1994-09-25 19:34:02 +00:00
|
|
|
if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
|
1994-05-24 10:09:53 +00:00
|
|
|
goto done;
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
for (i = 0; i < uap->iovcnt; i++) {
|
1998-08-24 08:39:39 +00:00
|
|
|
if (iov->iov_len > INT_MAX - auio.uio_resid) {
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
1998-08-24 08:39:39 +00:00
|
|
|
auio.uio_resid += iov->iov_len;
|
1994-05-24 10:09:53 +00:00
|
|
|
iov++;
|
|
|
|
}
|
|
|
|
#ifdef KTRACE
|
|
|
|
/*
|
2000-07-02 08:08:09 +00:00
|
|
|
* if tracing, save a copy of iovec and uio
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
if (KTRPOINT(p, KTR_GENIO)) {
|
|
|
|
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
|
|
|
|
bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen);
|
2000-07-02 08:08:09 +00:00
|
|
|
ktruio = auio;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
cnt = auio.uio_resid;
|
2000-12-07 23:45:57 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE)
|
|
|
|
bwillwrite();
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
if ((error = fo_write(fp, &auio, fp->f_cred, 0, p))) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (auio.uio_resid != cnt && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2001-03-07 03:37:06 +00:00
|
|
|
if (error == EPIPE) {
|
|
|
|
PROC_LOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
psignal(p, SIGPIPE);
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
cnt -= auio.uio_resid;
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (ktriov != NULL) {
|
2000-07-02 08:08:09 +00:00
|
|
|
if (error == 0) {
|
|
|
|
ktruio.uio_iov = ktriov;
|
|
|
|
ktruio.uio_resid = cnt;
|
|
|
|
ktrgenio(p->p_tracep, uap->fd, UIO_WRITE, &ktruio,
|
|
|
|
error);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
FREE(ktriov, M_TEMP);
|
|
|
|
}
|
|
|
|
#endif
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = cnt;
|
1994-05-24 10:09:53 +00:00
|
|
|
done:
|
1999-10-14 10:50:06 +00:00
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (needfree)
|
|
|
|
FREE(needfree, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ioctl system call
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ioctl_args {
|
|
|
|
int fd;
|
1998-08-24 08:39:39 +00:00
|
|
|
u_long com;
|
1994-05-24 10:09:53 +00:00
|
|
|
caddr_t data;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
ioctl(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
|
|
|
register struct ioctl_args *uap;
|
|
|
|
{
|
|
|
|
register struct file *fp;
|
|
|
|
register struct filedesc *fdp;
|
1998-06-10 10:29:31 +00:00
|
|
|
register u_long com;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
register u_int size;
|
|
|
|
caddr_t data, memp;
|
|
|
|
int tmp;
|
|
|
|
#define STK_PARAMS 128
|
2000-05-09 17:43:21 +00:00
|
|
|
union {
|
|
|
|
char stkbuf[STK_PARAMS];
|
|
|
|
long align;
|
|
|
|
} ubuf;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
fdp = p->p_fd;
|
|
|
|
if ((u_int)uap->fd >= fdp->fd_nfiles ||
|
|
|
|
(fp = fdp->fd_ofiles[uap->fd]) == NULL)
|
|
|
|
return (EBADF);
|
|
|
|
|
|
|
|
if ((fp->f_flag & (FREAD | FWRITE)) == 0)
|
|
|
|
return (EBADF);
|
|
|
|
|
|
|
|
switch (com = uap->com) {
|
|
|
|
case FIONCLEX:
|
|
|
|
fdp->fd_ofileflags[uap->fd] &= ~UF_EXCLOSE;
|
|
|
|
return (0);
|
|
|
|
case FIOCLEX:
|
|
|
|
fdp->fd_ofileflags[uap->fd] |= UF_EXCLOSE;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interpret high order word to find amount of data to be
|
|
|
|
* copied to/from the user's address space.
|
|
|
|
*/
|
|
|
|
size = IOCPARM_LEN(com);
|
|
|
|
if (size > IOCPARM_MAX)
|
|
|
|
return (ENOTTY);
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
fhold(fp);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
memp = NULL;
|
2000-05-09 17:43:21 +00:00
|
|
|
if (size > sizeof (ubuf.stkbuf)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
memp = (caddr_t)malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
|
|
|
|
data = memp;
|
2000-11-18 21:01:04 +00:00
|
|
|
} else {
|
2000-05-09 17:43:21 +00:00
|
|
|
data = ubuf.stkbuf;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (com&IOC_IN) {
|
|
|
|
if (size) {
|
|
|
|
error = copyin(uap->data, data, (u_int)size);
|
|
|
|
if (error) {
|
|
|
|
if (memp)
|
|
|
|
free(memp, M_IOCTLOPS);
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
*(caddr_t *)data = uap->data;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
|
|
|
} else if ((com&IOC_OUT) && size) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Zero the buffer so the user always
|
|
|
|
* gets back something deterministic.
|
|
|
|
*/
|
|
|
|
bzero(data, size);
|
2000-11-18 21:01:04 +00:00
|
|
|
} else if (com&IOC_VOID) {
|
1994-05-24 10:09:53 +00:00
|
|
|
*(caddr_t *)data = uap->data;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
switch (com) {
|
|
|
|
|
|
|
|
case FIONBIO:
|
1994-09-25 19:34:02 +00:00
|
|
|
if ((tmp = *(int *)data))
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_flag |= FNONBLOCK;
|
|
|
|
else
|
|
|
|
fp->f_flag &= ~FNONBLOCK;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FIOASYNC:
|
1994-09-25 19:34:02 +00:00
|
|
|
if ((tmp = *(int *)data))
|
1994-05-24 10:09:53 +00:00
|
|
|
fp->f_flag |= FASYNC;
|
|
|
|
else
|
|
|
|
fp->f_flag &= ~FASYNC;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
error = fo_ioctl(fp, com, data, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copy any data to user, size was
|
|
|
|
* already set and checked above.
|
|
|
|
*/
|
|
|
|
if (error == 0 && (com&IOC_OUT) && size)
|
|
|
|
error = copyout(data, uap->data, (u_int)size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (memp)
|
|
|
|
free(memp, M_IOCTLOPS);
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-01-05 19:40:17 +00:00
|
|
|
static int nselcoll; /* Select collisions since boot */
|
2001-05-14 05:26:48 +00:00
|
|
|
struct cv selwait;
|
2000-01-05 19:40:17 +00:00
|
|
|
SYSCTL_INT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, "");
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Select system call.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct select_args {
|
1996-08-20 07:18:10 +00:00
|
|
|
int nd;
|
1994-05-24 10:09:53 +00:00
|
|
|
fd_set *in, *ou, *ex;
|
|
|
|
struct timeval *tv;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
select(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct proc *p;
|
|
|
|
register struct select_args *uap;
|
|
|
|
{
|
1997-02-20 11:51:52 +00:00
|
|
|
/*
|
|
|
|
* The magic 2048 here is chosen to be just enough for FD_SETSIZE
|
|
|
|
* infds with the new FD_SETSIZE of 1024, and more than enough for
|
|
|
|
* FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
|
|
|
|
* of 256.
|
|
|
|
*/
|
|
|
|
fd_mask s_selbits[howmany(2048, NFDBITS)];
|
2001-05-14 05:26:48 +00:00
|
|
|
fd_mask s_heldbits[howmany(2048, NFDBITS)];
|
|
|
|
fd_mask *ibits[3], *obits[3], *selbits, *sbp, *heldbits, *hibits, *hobits;
|
1998-04-04 13:26:20 +00:00
|
|
|
struct timeval atv, rtv, ttv;
|
2001-05-14 05:26:48 +00:00
|
|
|
int ncoll, error, timo, i;
|
1997-02-20 11:51:52 +00:00
|
|
|
u_int nbufbytes, ncpbytes, nfdbits;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1996-08-20 07:18:10 +00:00
|
|
|
if (uap->nd < 0)
|
1996-08-20 15:03:41 +00:00
|
|
|
return (EINVAL);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (uap->nd > p->p_fd->fd_nfiles)
|
1996-08-20 07:18:10 +00:00
|
|
|
uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */
|
|
|
|
|
|
|
|
/*
|
1997-02-20 11:51:52 +00:00
|
|
|
* Allocate just enough bits for the non-null fd_sets. Use the
|
|
|
|
* preallocated auto buffer if possible.
|
1996-08-20 07:18:10 +00:00
|
|
|
*/
|
1997-02-20 11:51:52 +00:00
|
|
|
nfdbits = roundup(uap->nd, NFDBITS);
|
|
|
|
ncpbytes = nfdbits / NBBY;
|
|
|
|
nbufbytes = 0;
|
|
|
|
if (uap->in != NULL)
|
|
|
|
nbufbytes += 2 * ncpbytes;
|
|
|
|
if (uap->ou != NULL)
|
|
|
|
nbufbytes += 2 * ncpbytes;
|
|
|
|
if (uap->ex != NULL)
|
|
|
|
nbufbytes += 2 * ncpbytes;
|
|
|
|
if (nbufbytes <= sizeof s_selbits)
|
|
|
|
selbits = &s_selbits[0];
|
|
|
|
else
|
|
|
|
selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
|
2001-05-14 05:26:48 +00:00
|
|
|
if (2 * ncpbytes <= sizeof s_heldbits) {
|
|
|
|
bzero(s_heldbits, sizeof(s_heldbits));
|
|
|
|
heldbits = &s_heldbits[0];
|
|
|
|
} else
|
|
|
|
heldbits = malloc(2 * ncpbytes, M_SELECT, M_WAITOK | M_ZERO);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-20 11:51:52 +00:00
|
|
|
/*
|
|
|
|
* Assign pointers into the bit buffers and fetch the input bits.
|
|
|
|
* Put the output buffers together so that they can be bzeroed
|
|
|
|
* together.
|
|
|
|
*/
|
|
|
|
sbp = selbits;
|
2001-05-14 05:26:48 +00:00
|
|
|
hibits = heldbits + ncpbytes / sizeof *heldbits;
|
|
|
|
hobits = heldbits;
|
1994-05-24 10:09:53 +00:00
|
|
|
#define getbits(name, x) \
|
1997-02-20 11:51:52 +00:00
|
|
|
do { \
|
|
|
|
if (uap->name == NULL) \
|
|
|
|
ibits[x] = NULL; \
|
|
|
|
else { \
|
|
|
|
ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
|
|
|
|
obits[x] = sbp; \
|
|
|
|
sbp += ncpbytes / sizeof *sbp; \
|
|
|
|
error = copyin(uap->name, ibits[x], ncpbytes); \
|
2001-05-14 05:26:48 +00:00
|
|
|
if (error != 0) \
|
|
|
|
goto done_noproclock; \
|
|
|
|
for (i = 0; \
|
|
|
|
i < ncpbytes / sizeof ibits[i][0]; \
|
|
|
|
i++) \
|
|
|
|
hibits[i] |= ibits[x][i]; \
|
1997-02-20 11:51:52 +00:00
|
|
|
} \
|
|
|
|
} while (0)
|
1994-05-24 10:09:53 +00:00
|
|
|
getbits(in, 0);
|
|
|
|
getbits(ou, 1);
|
|
|
|
getbits(ex, 2);
|
|
|
|
#undef getbits
|
1997-02-20 11:51:52 +00:00
|
|
|
if (nbufbytes != 0)
|
|
|
|
bzero(selbits, nbufbytes / 2);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if (uap->tv) {
|
|
|
|
error = copyin((caddr_t)uap->tv, (caddr_t)&atv,
|
|
|
|
sizeof (atv));
|
2001-05-14 05:26:48 +00:00
|
|
|
if (error)
|
|
|
|
goto done_noproclock;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (itimerfix(&atv)) {
|
|
|
|
error = EINVAL;
|
2001-05-14 05:26:48 +00:00
|
|
|
goto done_noproclock;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(&rtv);
|
1998-04-04 13:26:20 +00:00
|
|
|
timevaladd(&atv, &rtv);
|
2000-07-13 02:12:25 +00:00
|
|
|
} else {
|
1998-04-04 13:26:20 +00:00
|
|
|
atv.tv_sec = 0;
|
2000-07-13 02:12:25 +00:00
|
|
|
atv.tv_usec = 0;
|
|
|
|
}
|
2001-05-14 05:26:48 +00:00
|
|
|
selholddrop(p, hibits, hobits, uap->nd, 1);
|
1998-04-04 13:26:20 +00:00
|
|
|
timo = 0;
|
2001-01-24 11:12:37 +00:00
|
|
|
PROC_LOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
retry:
|
|
|
|
ncoll = nselcoll;
|
|
|
|
p->p_flag |= P_SELECT;
|
2001-05-15 10:19:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
1997-11-06 19:29:57 +00:00
|
|
|
error = selscan(p, ibits, obits, uap->nd);
|
2001-05-15 10:19:57 +00:00
|
|
|
PROC_LOCK(p);
|
1997-11-06 19:29:57 +00:00
|
|
|
if (error || p->p_retval[0])
|
1994-05-24 10:09:53 +00:00
|
|
|
goto done;
|
2000-07-12 22:46:40 +00:00
|
|
|
if (atv.tv_sec || atv.tv_usec) {
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(&rtv);
|
2001-05-15 10:19:57 +00:00
|
|
|
if (timevalcmp(&rtv, &atv, >=)) {
|
|
|
|
/*
|
|
|
|
* An event of our interest may occur during locking a process.
|
|
|
|
* In order to avoid missing the event that occured during locking
|
|
|
|
* the process, test P_SELECT and rescan file descriptors if
|
|
|
|
* necessary.
|
|
|
|
*/
|
|
|
|
if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
|
|
|
|
ncoll = nselcoll;
|
|
|
|
p->p_flag |= P_SELECT;
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
error = selscan(p, ibits, obits, uap->nd);
|
|
|
|
PROC_LOCK(p);
|
|
|
|
}
|
1998-04-04 13:26:20 +00:00
|
|
|
goto done;
|
2001-05-15 10:19:57 +00:00
|
|
|
}
|
1998-04-04 13:26:20 +00:00
|
|
|
ttv = atv;
|
|
|
|
timevalsub(&ttv, &rtv);
|
|
|
|
timo = ttv.tv_sec > 24 * 60 * 60 ?
|
|
|
|
24 * 60 * 60 * hz : tvtohz(&ttv);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
p->p_flag &= ~P_SELECT;
|
2000-01-14 02:53:29 +00:00
|
|
|
|
2001-05-14 05:26:48 +00:00
|
|
|
if (timo > 0)
|
|
|
|
error = cv_timedwait_sig(&selwait, &p->p_mtx, timo);
|
|
|
|
else
|
|
|
|
error = cv_wait_sig(&selwait, &p->p_mtx);
|
2000-01-14 02:53:29 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == 0)
|
|
|
|
goto retry;
|
2001-05-14 05:26:48 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
done:
|
|
|
|
p->p_flag &= ~P_SELECT;
|
2001-01-24 11:12:37 +00:00
|
|
|
PROC_UNLOCK(p);
|
2001-05-14 05:26:48 +00:00
|
|
|
selholddrop(p, hibits, hobits, uap->nd, 0);
|
|
|
|
done_noproclock:
|
1994-05-24 10:09:53 +00:00
|
|
|
/* select is not restarted after signals... */
|
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
if (error == EWOULDBLOCK)
|
|
|
|
error = 0;
|
|
|
|
#define putbits(name, x) \
|
1997-02-20 11:51:52 +00:00
|
|
|
if (uap->name && (error2 = copyout(obits[x], uap->name, ncpbytes))) \
|
1994-05-24 10:09:53 +00:00
|
|
|
error = error2;
|
|
|
|
if (error == 0) {
|
|
|
|
int error2;
|
|
|
|
|
|
|
|
putbits(in, 0);
|
|
|
|
putbits(ou, 1);
|
|
|
|
putbits(ex, 2);
|
|
|
|
#undef putbits
|
|
|
|
}
|
1997-02-20 11:51:52 +00:00
|
|
|
if (selbits != &s_selbits[0])
|
|
|
|
free(selbits, M_SELECT);
|
2001-05-14 05:26:48 +00:00
|
|
|
if (heldbits != &s_heldbits[0])
|
|
|
|
free(heldbits, M_SELECT);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2001-05-14 05:26:48 +00:00
|
|
|
static int
|
|
|
|
selholddrop(p, ibits, obits, nfd, hold)
|
|
|
|
struct proc *p;
|
|
|
|
fd_mask *ibits, *obits;
|
|
|
|
int nfd, hold;
|
|
|
|
{
|
|
|
|
struct filedesc *fdp = p->p_fd;
|
|
|
|
int i, fd;
|
|
|
|
fd_mask bits;
|
|
|
|
struct file *fp;
|
|
|
|
|
|
|
|
for (i = 0; i < nfd; i += NFDBITS) {
|
|
|
|
if (hold)
|
|
|
|
bits = ibits[i/NFDBITS];
|
|
|
|
else
|
|
|
|
bits = obits[i/NFDBITS];
|
|
|
|
/* ffs(int mask) not portable, fd_mask is long */
|
|
|
|
for (fd = i; bits && fd < nfd; fd++, bits >>= 1) {
|
|
|
|
if (!(bits & 1))
|
|
|
|
continue;
|
|
|
|
fp = fdp->fd_ofiles[fd];
|
|
|
|
if (fp == NULL)
|
|
|
|
return (EBADF);
|
|
|
|
if (hold) {
|
|
|
|
fhold(fp);
|
|
|
|
obits[(fd)/NFDBITS] |=
|
|
|
|
((fd_mask)1 << ((fd) % NFDBITS));
|
|
|
|
} else
|
|
|
|
fdrop(fp, p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1995-12-14 08:32:45 +00:00
|
|
|
static int
|
1997-11-06 19:29:57 +00:00
|
|
|
selscan(p, ibits, obits, nfd)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1996-08-20 07:18:10 +00:00
|
|
|
fd_mask **ibits, **obits;
|
1997-11-06 19:29:57 +00:00
|
|
|
int nfd;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-02-20 13:36:26 +00:00
|
|
|
struct filedesc *fdp = p->p_fd;
|
|
|
|
int msk, i, fd;
|
|
|
|
fd_mask bits;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
int n = 0;
|
1997-11-23 10:30:50 +00:00
|
|
|
/* Note: backend also returns POLLHUP/POLLERR if appropriate. */
|
|
|
|
static int flag[3] = { POLLRDNORM, POLLWRNORM, POLLRDBAND };
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
for (msk = 0; msk < 3; msk++) {
|
1997-02-20 11:51:52 +00:00
|
|
|
if (ibits[msk] == NULL)
|
|
|
|
continue;
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < nfd; i += NFDBITS) {
|
1996-08-20 07:18:10 +00:00
|
|
|
bits = ibits[msk][i/NFDBITS];
|
2000-02-20 13:36:26 +00:00
|
|
|
/* ffs(int mask) not portable, fd_mask is long */
|
|
|
|
for (fd = i; bits && fd < nfd; fd++, bits >>= 1) {
|
|
|
|
if (!(bits & 1))
|
|
|
|
continue;
|
1994-05-24 10:09:53 +00:00
|
|
|
fp = fdp->fd_ofiles[fd];
|
|
|
|
if (fp == NULL)
|
|
|
|
return (EBADF);
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
if (fo_poll(fp, flag[msk], fp->f_cred, p)) {
|
1997-02-20 11:51:52 +00:00
|
|
|
obits[msk][(fd)/NFDBITS] |=
|
2000-02-20 13:36:26 +00:00
|
|
|
((fd_mask)1 << ((fd) % NFDBITS));
|
1994-05-24 10:09:53 +00:00
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = n;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-09-14 02:30:32 +00:00
|
|
|
/*
|
|
|
|
* Poll system call.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct poll_args {
|
|
|
|
struct pollfd *fds;
|
|
|
|
u_int nfds;
|
|
|
|
int timeout;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
poll(p, uap)
|
2001-02-27 15:11:31 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct poll_args *uap;
|
1997-09-14 02:30:32 +00:00
|
|
|
{
|
|
|
|
caddr_t bits;
|
|
|
|
char smallbits[32 * sizeof(struct pollfd)];
|
1998-04-04 13:26:20 +00:00
|
|
|
struct timeval atv, rtv, ttv;
|
2001-05-14 05:26:48 +00:00
|
|
|
int ncoll, error = 0, timo;
|
2001-02-27 15:11:31 +00:00
|
|
|
u_int nfds;
|
1997-09-14 02:30:32 +00:00
|
|
|
size_t ni;
|
2001-05-14 05:26:48 +00:00
|
|
|
struct pollfd p_heldbits[32];
|
|
|
|
struct pollfd *heldbits;
|
1997-09-14 02:30:32 +00:00
|
|
|
|
2001-02-07 23:28:01 +00:00
|
|
|
nfds = SCARG(uap, nfds);
|
|
|
|
/*
|
2001-02-09 08:10:22 +00:00
|
|
|
* This is kinda bogus. We have fd limits, but that is not
|
|
|
|
* really related to the size of the pollfd array. Make sure
|
|
|
|
* we let the process use at least FD_SETSIZE entries and at
|
|
|
|
* least enough for the current limits. We want to be reasonably
|
|
|
|
* safe, but not overly restrictive.
|
2001-02-07 23:28:01 +00:00
|
|
|
*/
|
2001-02-27 15:11:31 +00:00
|
|
|
if (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && nfds > FD_SETSIZE)
|
2001-02-07 23:28:01 +00:00
|
|
|
return (EINVAL);
|
|
|
|
ni = nfds * sizeof(struct pollfd);
|
1997-09-14 02:30:32 +00:00
|
|
|
if (ni > sizeof(smallbits))
|
|
|
|
bits = malloc(ni, M_TEMP, M_WAITOK);
|
|
|
|
else
|
|
|
|
bits = smallbits;
|
2001-05-14 05:26:48 +00:00
|
|
|
if (ni > sizeof(p_heldbits))
|
|
|
|
heldbits = malloc(ni, M_TEMP, M_WAITOK);
|
|
|
|
else {
|
|
|
|
bzero(p_heldbits, sizeof(p_heldbits));
|
|
|
|
heldbits = p_heldbits;
|
|
|
|
}
|
1997-09-14 02:30:32 +00:00
|
|
|
error = copyin(SCARG(uap, fds), bits, ni);
|
|
|
|
if (error)
|
2001-05-14 05:26:48 +00:00
|
|
|
goto done_noproclock;
|
|
|
|
bcopy(bits, heldbits, ni);
|
1997-09-14 02:30:32 +00:00
|
|
|
if (SCARG(uap, timeout) != INFTIM) {
|
|
|
|
atv.tv_sec = SCARG(uap, timeout) / 1000;
|
|
|
|
atv.tv_usec = (SCARG(uap, timeout) % 1000) * 1000;
|
|
|
|
if (itimerfix(&atv)) {
|
|
|
|
error = EINVAL;
|
2001-05-14 05:26:48 +00:00
|
|
|
goto done_noproclock;
|
1997-09-14 02:30:32 +00:00
|
|
|
}
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(&rtv);
|
1998-04-04 13:26:20 +00:00
|
|
|
timevaladd(&atv, &rtv);
|
2000-07-13 02:12:25 +00:00
|
|
|
} else {
|
1998-04-04 13:26:20 +00:00
|
|
|
atv.tv_sec = 0;
|
2000-07-13 02:12:25 +00:00
|
|
|
atv.tv_usec = 0;
|
|
|
|
}
|
2001-05-14 05:26:48 +00:00
|
|
|
pollholddrop(p, heldbits, nfds, 1);
|
1998-04-04 13:26:20 +00:00
|
|
|
timo = 0;
|
2001-05-14 05:26:48 +00:00
|
|
|
PROC_LOCK(p);
|
1997-09-14 02:30:32 +00:00
|
|
|
retry:
|
|
|
|
ncoll = nselcoll;
|
|
|
|
p->p_flag |= P_SELECT;
|
2001-05-15 10:19:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
2001-02-07 23:28:01 +00:00
|
|
|
error = pollscan(p, (struct pollfd *)bits, nfds);
|
2001-05-15 10:19:57 +00:00
|
|
|
PROC_LOCK(p);
|
1997-11-06 19:29:57 +00:00
|
|
|
if (error || p->p_retval[0])
|
1997-09-14 02:30:32 +00:00
|
|
|
goto done;
|
2000-07-12 22:46:40 +00:00
|
|
|
if (atv.tv_sec || atv.tv_usec) {
|
1998-05-17 11:53:46 +00:00
|
|
|
getmicrouptime(&rtv);
|
2001-05-15 10:19:57 +00:00
|
|
|
if (timevalcmp(&rtv, &atv, >=)) {
|
|
|
|
/*
|
|
|
|
* An event of our interest may occur during locking a process.
|
|
|
|
* In order to avoid missing the event that occured during locking
|
|
|
|
* the process, test P_SELECT and rescan file descriptors if
|
|
|
|
* necessary.
|
|
|
|
*/
|
|
|
|
if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
|
|
|
|
ncoll = nselcoll;
|
|
|
|
p->p_flag |= P_SELECT;
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
error = pollscan(p, (struct pollfd *)bits, nfds);
|
|
|
|
PROC_LOCK(p);
|
|
|
|
}
|
1998-04-04 13:26:20 +00:00
|
|
|
goto done;
|
2001-05-15 10:19:57 +00:00
|
|
|
}
|
1998-04-04 13:26:20 +00:00
|
|
|
ttv = atv;
|
|
|
|
timevalsub(&ttv, &rtv);
|
|
|
|
timo = ttv.tv_sec > 24 * 60 * 60 ?
|
|
|
|
24 * 60 * 60 * hz : tvtohz(&ttv);
|
1997-09-14 02:30:32 +00:00
|
|
|
}
|
|
|
|
p->p_flag &= ~P_SELECT;
|
2001-05-14 05:26:48 +00:00
|
|
|
if (timo > 0)
|
|
|
|
error = cv_timedwait_sig(&selwait, &p->p_mtx, timo);
|
|
|
|
else
|
|
|
|
error = cv_wait_sig(&selwait, &p->p_mtx);
|
1997-09-14 02:30:32 +00:00
|
|
|
if (error == 0)
|
|
|
|
goto retry;
|
2001-05-14 05:26:48 +00:00
|
|
|
|
1997-09-14 02:30:32 +00:00
|
|
|
done:
|
|
|
|
p->p_flag &= ~P_SELECT;
|
2001-01-24 11:12:37 +00:00
|
|
|
PROC_UNLOCK(p);
|
2001-05-14 05:26:48 +00:00
|
|
|
pollholddrop(p, heldbits, nfds, 0);
|
|
|
|
done_noproclock:
|
1997-09-14 02:30:32 +00:00
|
|
|
/* poll is not restarted after signals... */
|
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
if (error == EWOULDBLOCK)
|
|
|
|
error = 0;
|
|
|
|
if (error == 0) {
|
|
|
|
error = copyout(bits, SCARG(uap, fds), ni);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
if (ni > sizeof(smallbits))
|
|
|
|
free(bits, M_TEMP);
|
2001-05-14 05:26:48 +00:00
|
|
|
if (ni > sizeof(p_heldbits))
|
|
|
|
free(heldbits, M_TEMP);
|
1997-09-14 02:30:32 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2001-05-14 05:26:48 +00:00
|
|
|
static int
|
|
|
|
pollholddrop(p, fds, nfd, hold)
|
|
|
|
struct proc *p;
|
|
|
|
struct pollfd *fds;
|
|
|
|
u_int nfd;
|
|
|
|
int hold;
|
|
|
|
{
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
int i;
|
|
|
|
struct file *fp;
|
|
|
|
|
|
|
|
for (i = 0; i < nfd; i++, fds++) {
|
|
|
|
if (0 <= fds->fd && fds->fd < fdp->fd_nfiles) {
|
|
|
|
fp = fdp->fd_ofiles[fds->fd];
|
|
|
|
if (hold) {
|
|
|
|
if (fp != NULL) {
|
|
|
|
fhold(fp);
|
|
|
|
fds->revents = 1;
|
|
|
|
} else
|
|
|
|
fds->revents = 0;
|
|
|
|
} else if(fp != NULL && fds->revents)
|
|
|
|
fdrop(fp, p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-09-14 02:30:32 +00:00
|
|
|
static int
|
1997-11-06 19:29:57 +00:00
|
|
|
pollscan(p, fds, nfd)
|
1997-09-14 02:30:32 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct pollfd *fds;
|
2001-02-27 15:11:31 +00:00
|
|
|
u_int nfd;
|
1997-09-14 02:30:32 +00:00
|
|
|
{
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
int i;
|
|
|
|
struct file *fp;
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < nfd; i++, fds++) {
|
1998-12-10 01:53:26 +00:00
|
|
|
if (fds->fd >= fdp->fd_nfiles) {
|
1997-09-14 02:30:32 +00:00
|
|
|
fds->revents = POLLNVAL;
|
|
|
|
n++;
|
1998-12-10 01:53:26 +00:00
|
|
|
} else if (fds->fd < 0) {
|
|
|
|
fds->revents = 0;
|
1997-09-14 02:30:32 +00:00
|
|
|
} else {
|
|
|
|
fp = fdp->fd_ofiles[fds->fd];
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fp == NULL) {
|
1997-09-14 02:30:32 +00:00
|
|
|
fds->revents = POLLNVAL;
|
|
|
|
n++;
|
|
|
|
} else {
|
1997-11-23 10:30:50 +00:00
|
|
|
/*
|
|
|
|
* Note: backend also returns POLLHUP and
|
|
|
|
* POLLERR if appropriate.
|
|
|
|
*/
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
fds->revents = fo_poll(fp, fds->events,
|
|
|
|
fp->f_cred, p);
|
1997-09-14 02:30:32 +00:00
|
|
|
if (fds->revents != 0)
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = n;
|
1997-09-14 02:30:32 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OpenBSD poll system call.
|
|
|
|
* XXX this isn't quite a true representation.. OpenBSD uses select ops.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct openbsd_poll_args {
|
|
|
|
struct pollfd *fds;
|
|
|
|
u_int nfds;
|
|
|
|
int timeout;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
openbsd_poll(p, uap)
|
1997-09-14 02:30:32 +00:00
|
|
|
register struct proc *p;
|
|
|
|
register struct openbsd_poll_args *uap;
|
|
|
|
{
|
1997-11-06 19:29:57 +00:00
|
|
|
return (poll(p, (struct poll_args *)uap));
|
1997-09-14 02:30:32 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*ARGSUSED*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-09-14 02:30:32 +00:00
|
|
|
seltrue(dev, events, p)
|
1994-05-24 10:09:53 +00:00
|
|
|
dev_t dev;
|
1997-09-14 02:30:32 +00:00
|
|
|
int events;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
|
1997-09-14 02:30:32 +00:00
|
|
|
return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record a select request.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
selrecord(selector, sip)
|
|
|
|
struct proc *selector;
|
|
|
|
struct selinfo *sip;
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
pid_t mypid;
|
|
|
|
|
|
|
|
mypid = selector->p_pid;
|
|
|
|
if (sip->si_pid == mypid)
|
|
|
|
return;
|
2001-01-24 11:12:37 +00:00
|
|
|
if (sip->si_pid && (p = pfind(sip->si_pid))) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2001-01-24 11:12:37 +00:00
|
|
|
if (p->p_wchan == (caddr_t)&selwait) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
2001-01-24 11:12:37 +00:00
|
|
|
sip->si_flags |= SI_COLL;
|
|
|
|
return;
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
2001-04-24 00:51:53 +00:00
|
|
|
PROC_UNLOCK(p);
|
2001-01-24 11:12:37 +00:00
|
|
|
}
|
|
|
|
sip->si_pid = mypid;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do a wakeup when a selectable event occurs.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
selwakeup(sip)
|
|
|
|
register struct selinfo *sip;
|
|
|
|
{
|
|
|
|
register struct proc *p;
|
|
|
|
|
|
|
|
if (sip->si_pid == 0)
|
|
|
|
return;
|
|
|
|
if (sip->si_flags & SI_COLL) {
|
|
|
|
nselcoll++;
|
|
|
|
sip->si_flags &= ~SI_COLL;
|
2001-05-14 05:26:48 +00:00
|
|
|
cv_broadcast(&selwait);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
p = pfind(sip->si_pid);
|
|
|
|
sip->si_pid = 0;
|
|
|
|
if (p != NULL) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (p->p_wchan == (caddr_t)&selwait) {
|
|
|
|
if (p->p_stat == SSLEEP)
|
|
|
|
setrunnable(p);
|
|
|
|
else
|
2001-05-14 05:26:48 +00:00
|
|
|
cv_waitq_remove(p);
|
2001-04-24 00:51:53 +00:00
|
|
|
} else
|
1994-05-24 10:09:53 +00:00
|
|
|
p->p_flag &= ~P_SELECT;
|
2001-04-24 00:51:53 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
2001-05-14 05:26:48 +00:00
|
|
|
|
|
|
|
static void selectinit __P((void *));
|
|
|
|
SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, selectinit, NULL)
|
|
|
|
|
|
|
|
/* ARGSUSED*/
|
|
|
|
static void
|
|
|
|
selectinit(dummy)
|
|
|
|
void *dummy;
|
|
|
|
{
|
|
|
|
cv_init(&selwait, "select");
|
|
|
|
}
|