1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1996 John S. Dyson
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice immediately at the beginning of the file, without modification,
|
|
|
|
* this list of conditions, and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Absolutely no warranty of function or purpose is made by the author
|
|
|
|
* John S. Dyson.
|
1996-02-11 22:09:50 +00:00
|
|
|
* 4. Modifications may be freely made to this file if the above conditions
|
1996-01-28 23:38:26 +00:00
|
|
|
* are met.
|
|
|
|
*
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file contains a high-performance replacement for the socket-based
|
|
|
|
* pipes scheme originally used in FreeBSD/4.4Lite. It does not support
|
|
|
|
* all features of sockets, but does do everything that pipes normally
|
|
|
|
* do.
|
|
|
|
*/
|
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* This code has two modes of operation, a small write mode and a large
|
|
|
|
* write mode. The small write mode acts like conventional pipes with
|
|
|
|
* a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
|
|
|
|
* "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
|
|
|
|
* and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
|
|
|
|
* the receiving process can copy it directly from the pages in the sending
|
|
|
|
* process.
|
|
|
|
*
|
|
|
|
* If the sending process receives a signal, it is possible that it will
|
1996-02-05 05:50:34 +00:00
|
|
|
* go away, and certainly its address space can change, because control
|
1996-02-04 19:56:35 +00:00
|
|
|
* is returned back to the user-mode side. In that case, the pipe code
|
|
|
|
* arranges to copy the buffer supplied by the user process, to a pageable
|
|
|
|
* kernel buffer, and the receiving process will grab the data from the
|
|
|
|
* pageable kernel buffer. Since signals don't happen all that often,
|
|
|
|
* the copy operation is normally eliminated.
|
|
|
|
*
|
|
|
|
* The constant PIPE_MINDIRECT is chosen to make sure that buffering will
|
|
|
|
* happen for small transfers so that the system will not spend all of
|
1996-02-05 05:50:34 +00:00
|
|
|
* its time context switching. PIPE_SIZE is constrained by the
|
1996-02-04 19:56:35 +00:00
|
|
|
* amount of kernel virtual memory.
|
|
|
|
*/
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/proc.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/filedesc.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/filio.h>
|
|
|
|
#include <sys/ttycom.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/stat.h>
|
1997-09-14 02:43:25 +00:00
|
|
|
#include <sys/poll.h>
|
1999-01-27 10:10:03 +00:00
|
|
|
#include <sys/select.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/signalvar.h>
|
|
|
|
#include <sys/sysproto.h>
|
|
|
|
#include <sys/pipe.h>
|
1999-12-26 13:04:52 +00:00
|
|
|
#include <sys/vnode.h>
|
1998-03-28 10:33:27 +00:00
|
|
|
#include <sys/uio.h>
|
2000-04-16 18:53:38 +00:00
|
|
|
#include <sys/event.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_param.h>
|
1997-02-10 02:22:35 +00:00
|
|
|
#include <sys/lock.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
1996-02-04 19:56:35 +00:00
|
|
|
#include <vm/vm_page.h>
|
1997-08-05 00:02:08 +00:00
|
|
|
#include <vm/vm_zone.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* Use this define if you want to disable *fancy* VM things. Expect an
|
|
|
|
* approx 30% decrease in transfer rate. This could be useful for
|
|
|
|
* NetBSD or OpenBSD.
|
|
|
|
*/
|
|
|
|
/* #define PIPE_NODIRECT */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* interfaces to the outside world
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
static int pipe_read __P((struct file *fp, struct uio *uio,
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
struct ucred *cred, int flags, struct proc *p));
|
1996-01-28 23:38:26 +00:00
|
|
|
static int pipe_write __P((struct file *fp, struct uio *uio,
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
struct ucred *cred, int flags, struct proc *p));
|
1996-01-28 23:38:26 +00:00
|
|
|
static int pipe_close __P((struct file *fp, struct proc *p));
|
1997-09-14 02:43:25 +00:00
|
|
|
static int pipe_poll __P((struct file *fp, int events, struct ucred *cred,
|
|
|
|
struct proc *p));
|
1999-11-08 03:28:49 +00:00
|
|
|
static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
|
1998-06-07 17:13:14 +00:00
|
|
|
static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
static struct fileops pipeops =
|
1999-11-08 03:28:49 +00:00
|
|
|
{ pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_stat, pipe_close };
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
static int filt_pipeattach(struct knote *kn);
|
|
|
|
static void filt_pipedetach(struct knote *kn);
|
|
|
|
static int filt_piperead(struct knote *kn, long hint);
|
|
|
|
static int filt_pipewrite(struct knote *kn, long hint);
|
|
|
|
|
|
|
|
struct filterops pipe_rwfiltops[] = {
|
|
|
|
{ 1, filt_pipeattach, filt_pipedetach, filt_piperead },
|
|
|
|
{ 1, filt_pipeattach, filt_pipedetach, filt_pipewrite },
|
|
|
|
};
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* Default pipe buffer size(s), this can be kind-of large now because pipe
|
|
|
|
* space is pageable. The pipe code will try to maintain locality of
|
|
|
|
* reference for performance reasons, so small amounts of outstanding I/O
|
|
|
|
* will not wipe the cache.
|
|
|
|
*/
|
1996-02-04 19:56:35 +00:00
|
|
|
#define MINPIPESIZE (PIPE_SIZE/3)
|
|
|
|
#define MAXPIPESIZE (2*PIPE_SIZE/3)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Maximum amount of kva for pipes -- this is kind-of a soft limit, but
|
|
|
|
* is there so that on large systems, we don't exhaust it.
|
|
|
|
*/
|
|
|
|
#define MAXPIPEKVA (8*1024*1024)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit for direct transfers, we cannot, of course limit
|
|
|
|
* the amount of kva for pipes in general though.
|
|
|
|
*/
|
|
|
|
#define LIMITPIPEKVA (16*1024*1024)
|
1996-07-13 22:52:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit the number of "big" pipes
|
|
|
|
*/
|
|
|
|
#define LIMITBIGPIPES 32
|
1998-02-09 06:11:36 +00:00
|
|
|
static int nbigpipe;
|
1996-07-13 22:52:50 +00:00
|
|
|
|
1996-07-12 08:14:58 +00:00
|
|
|
static int amountpipekva;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
static void pipeclose __P((struct pipe *cpipe));
|
|
|
|
static void pipeinit __P((struct pipe *cpipe));
|
1996-02-04 19:56:35 +00:00
|
|
|
static __inline int pipelock __P((struct pipe *cpipe, int catch));
|
1996-01-28 23:38:26 +00:00
|
|
|
static __inline void pipeunlock __P((struct pipe *cpipe));
|
1996-02-17 14:47:16 +00:00
|
|
|
static __inline void pipeselwakeup __P((struct pipe *cpipe));
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio));
|
|
|
|
static void pipe_destroy_write_buffer __P((struct pipe *wpipe));
|
|
|
|
static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
|
|
|
|
static void pipe_clone_write_buffer __P((struct pipe *wpipe));
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-02-04 19:56:35 +00:00
|
|
|
static void pipespace __P((struct pipe *cpipe));
|
1996-01-28 23:38:26 +00:00
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static vm_zone_t pipe_zone;
|
1997-08-05 00:02:08 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* The pipe system call for the DTYPE_PIPE type of pipes
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
pipe(p, uap)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct pipe_args /* {
|
|
|
|
int dummy;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
struct file *rf, *wf;
|
|
|
|
struct pipe *rpipe, *wpipe;
|
|
|
|
int fd, error;
|
|
|
|
|
1997-08-05 00:02:08 +00:00
|
|
|
if (pipe_zone == NULL)
|
1997-08-05 22:24:31 +00:00
|
|
|
pipe_zone = zinit("PIPE", sizeof (struct pipe), 0, 0, 4);
|
1997-08-05 00:02:08 +00:00
|
|
|
|
|
|
|
rpipe = zalloc( pipe_zone);
|
1996-01-28 23:38:26 +00:00
|
|
|
pipeinit(rpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
rpipe->pipe_state |= PIPE_DIRECTOK;
|
1997-08-05 00:02:08 +00:00
|
|
|
wpipe = zalloc( pipe_zone);
|
1996-01-28 23:38:26 +00:00
|
|
|
pipeinit(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
wpipe->pipe_state |= PIPE_DIRECTOK;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2001-01-08 22:14:48 +00:00
|
|
|
/*
|
|
|
|
* Warning: once we've gotten past allocation of the fd for the
|
|
|
|
* read-side, we can only drop the read side via fdrop() in order
|
|
|
|
* to avoid races against processes which manage to dup() the read
|
|
|
|
* side while we are blocked trying to allocate the write side.
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
error = falloc(p, &rf, &fd);
|
|
|
|
if (error)
|
|
|
|
goto free2;
|
2000-11-18 21:01:04 +00:00
|
|
|
fhold(rf);
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = fd;
|
1996-01-28 23:38:26 +00:00
|
|
|
rf->f_flag = FREAD | FWRITE;
|
|
|
|
rf->f_type = DTYPE_PIPE;
|
|
|
|
rf->f_data = (caddr_t)rpipe;
|
1999-08-04 18:53:50 +00:00
|
|
|
rf->f_ops = &pipeops;
|
1996-01-28 23:38:26 +00:00
|
|
|
error = falloc(p, &wf, &fd);
|
|
|
|
if (error)
|
|
|
|
goto free3;
|
|
|
|
wf->f_flag = FREAD | FWRITE;
|
|
|
|
wf->f_type = DTYPE_PIPE;
|
|
|
|
wf->f_data = (caddr_t)wpipe;
|
1999-08-04 18:53:50 +00:00
|
|
|
wf->f_ops = &pipeops;
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[1] = fd;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
rpipe->pipe_peer = wpipe;
|
|
|
|
wpipe->pipe_peer = rpipe;
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(rf, p);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
free3:
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fdp->fd_ofiles[p->p_retval[0]] == rf) {
|
|
|
|
fdp->fd_ofiles[p->p_retval[0]] = NULL;
|
|
|
|
fdrop(rf, p);
|
|
|
|
}
|
|
|
|
fdrop(rf, p);
|
2001-01-08 22:14:48 +00:00
|
|
|
/* rpipe has been closed by fdrop() */
|
|
|
|
rpipe = NULL;
|
1996-01-28 23:38:26 +00:00
|
|
|
free2:
|
|
|
|
(void)pipeclose(wpipe);
|
|
|
|
(void)pipeclose(rpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1996-02-04 22:09:12 +00:00
|
|
|
/*
|
|
|
|
* Allocate kva for pipe circular buffer, the space is pageable
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
static void
|
1996-02-04 19:56:35 +00:00
|
|
|
pipespace(cpipe)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
1996-01-29 02:57:33 +00:00
|
|
|
int npages, error;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
npages = round_page(cpipe->pipe_buffer.size)/PAGE_SIZE;
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* Create an object, I don't like the idea of paging to/from
|
|
|
|
* kernel_object.
|
1996-02-11 22:09:50 +00:00
|
|
|
* XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
|
|
|
cpipe->pipe_buffer.object = vm_object_allocate(OBJT_DEFAULT, npages);
|
1996-01-29 02:57:33 +00:00
|
|
|
cpipe->pipe_buffer.buffer = (caddr_t) vm_map_min(kernel_map);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert the object into the kernel map, and allocate kva for it.
|
|
|
|
* The map entry is, by default, pageable.
|
1996-02-11 22:09:50 +00:00
|
|
|
* XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
1996-01-29 02:57:33 +00:00
|
|
|
error = vm_map_find(kernel_map, cpipe->pipe_buffer.object, 0,
|
1996-02-04 19:56:35 +00:00
|
|
|
(vm_offset_t *) &cpipe->pipe_buffer.buffer,
|
|
|
|
cpipe->pipe_buffer.size, 1,
|
1996-01-29 02:57:33 +00:00
|
|
|
VM_PROT_ALL, VM_PROT_ALL, 0);
|
|
|
|
|
|
|
|
if (error != KERN_SUCCESS)
|
|
|
|
panic("pipeinit: cannot allocate pipe -- out of kvm -- code = %d", error);
|
1996-02-04 19:56:35 +00:00
|
|
|
amountpipekva += cpipe->pipe_buffer.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize and allocate VM and memory for pipe
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pipeinit(cpipe)
|
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
cpipe->pipe_buffer.in = 0;
|
|
|
|
cpipe->pipe_buffer.out = 0;
|
|
|
|
cpipe->pipe_buffer.cnt = 0;
|
1996-02-04 19:56:35 +00:00
|
|
|
cpipe->pipe_buffer.size = PIPE_SIZE;
|
1996-07-13 22:52:50 +00:00
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
/* Buffer kva gets dynamically allocated */
|
|
|
|
cpipe->pipe_buffer.buffer = NULL;
|
1996-07-12 08:14:58 +00:00
|
|
|
/* cpipe->pipe_buffer.object = invalid */
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
cpipe->pipe_state = 0;
|
|
|
|
cpipe->pipe_peer = NULL;
|
|
|
|
cpipe->pipe_busy = 0;
|
1999-12-26 13:04:52 +00:00
|
|
|
vfs_timestamp(&cpipe->pipe_ctime);
|
1997-03-22 06:53:45 +00:00
|
|
|
cpipe->pipe_atime = cpipe->pipe_ctime;
|
|
|
|
cpipe->pipe_mtime = cpipe->pipe_ctime;
|
1996-01-28 23:38:26 +00:00
|
|
|
bzero(&cpipe->pipe_sel, sizeof cpipe->pipe_sel);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* pipe data structure initializations to support direct pipe I/O
|
|
|
|
*/
|
|
|
|
cpipe->pipe_map.cnt = 0;
|
|
|
|
cpipe->pipe_map.kva = 0;
|
|
|
|
cpipe->pipe_map.pos = 0;
|
|
|
|
cpipe->pipe_map.npages = 0;
|
1996-07-12 08:14:58 +00:00
|
|
|
/* cpipe->pipe_map.ms[] = invalid */
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lock a pipe for I/O, blocking other access
|
|
|
|
*/
|
|
|
|
static __inline int
|
1996-02-04 19:56:35 +00:00
|
|
|
pipelock(cpipe, catch)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe *cpipe;
|
1996-02-04 19:56:35 +00:00
|
|
|
int catch;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
1996-01-31 06:00:45 +00:00
|
|
|
int error;
|
1996-01-28 23:38:26 +00:00
|
|
|
while (cpipe->pipe_state & PIPE_LOCK) {
|
|
|
|
cpipe->pipe_state |= PIPE_LWANT;
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = tsleep( cpipe,
|
|
|
|
catch?(PRIBIO|PCATCH):PRIBIO, "pipelk", 0)) != 0) {
|
1996-01-31 06:00:45 +00:00
|
|
|
return error;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
cpipe->pipe_state |= PIPE_LOCK;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unlock a pipe I/O lock
|
|
|
|
*/
|
|
|
|
static __inline void
|
|
|
|
pipeunlock(cpipe)
|
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
|
|
|
cpipe->pipe_state &= ~PIPE_LOCK;
|
|
|
|
if (cpipe->pipe_state & PIPE_LWANT) {
|
|
|
|
cpipe->pipe_state &= ~PIPE_LWANT;
|
1996-02-22 03:33:52 +00:00
|
|
|
wakeup(cpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
static __inline void
|
|
|
|
pipeselwakeup(cpipe)
|
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
|
|
|
if (cpipe->pipe_state & PIPE_SEL) {
|
|
|
|
cpipe->pipe_state &= ~PIPE_SEL;
|
|
|
|
selwakeup(&cpipe->pipe_sel);
|
|
|
|
}
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
|
|
|
|
pgsigio(cpipe->pipe_sigio, SIGIO, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
KNOTE(&cpipe->pipe_sel.si_note, 0);
|
1996-02-11 22:09:50 +00:00
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
pipe_read(fp, uio, cred, flags, p)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct uio *uio;
|
|
|
|
struct ucred *cred;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
struct proc *p;
|
1999-04-04 21:41:28 +00:00
|
|
|
int flags;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
struct pipe *rpipe = (struct pipe *) fp->f_data;
|
1999-06-05 03:53:57 +00:00
|
|
|
int error;
|
1996-01-28 23:38:26 +00:00
|
|
|
int nread = 0;
|
1996-10-11 02:27:30 +00:00
|
|
|
u_int size;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
++rpipe->pipe_busy;
|
1999-06-05 03:53:57 +00:00
|
|
|
error = pipelock(rpipe, 1);
|
|
|
|
if (error)
|
|
|
|
goto unlocked_error;
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
while (uio->uio_resid) {
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* normal pipe buffer receive
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
if (rpipe->pipe_buffer.cnt > 0) {
|
1996-10-11 02:27:30 +00:00
|
|
|
size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
|
1996-01-28 23:38:26 +00:00
|
|
|
if (size > rpipe->pipe_buffer.cnt)
|
|
|
|
size = rpipe->pipe_buffer.cnt;
|
1996-10-11 02:27:30 +00:00
|
|
|
if (size > (u_int) uio->uio_resid)
|
|
|
|
size = (u_int) uio->uio_resid;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
|
|
|
error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
|
1996-01-28 23:38:26 +00:00
|
|
|
size, uio);
|
|
|
|
if (error) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rpipe->pipe_buffer.out += size;
|
|
|
|
if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
|
|
|
|
rpipe->pipe_buffer.out = 0;
|
|
|
|
|
|
|
|
rpipe->pipe_buffer.cnt -= size;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is no more to read in the pipe, reset
|
|
|
|
* its pointers to the beginning. This improves
|
|
|
|
* cache hit stats.
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_buffer.cnt == 0) {
|
|
|
|
rpipe->pipe_buffer.in = 0;
|
|
|
|
rpipe->pipe_buffer.out = 0;
|
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
nread += size;
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* Direct copy, bypassing a kernel buffer.
|
|
|
|
*/
|
|
|
|
} else if ((size = rpipe->pipe_map.cnt) &&
|
1999-06-05 03:53:57 +00:00
|
|
|
(rpipe->pipe_state & PIPE_DIRECTW)) {
|
|
|
|
caddr_t va;
|
1996-10-11 02:27:30 +00:00
|
|
|
if (size > (u_int) uio->uio_resid)
|
|
|
|
size = (u_int) uio->uio_resid;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
|
|
|
va = (caddr_t) rpipe->pipe_map.kva + rpipe->pipe_map.pos;
|
|
|
|
error = uiomove(va, size, uio);
|
1996-02-04 19:56:35 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
nread += size;
|
|
|
|
rpipe->pipe_map.pos += size;
|
|
|
|
rpipe->pipe_map.cnt -= size;
|
|
|
|
if (rpipe->pipe_map.cnt == 0) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_DIRECTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-01-28 23:38:26 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* detect EOF condition
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_EOF) {
|
1996-03-25 01:48:28 +00:00
|
|
|
/* XXX error = ? */
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
|
|
|
}
|
1999-02-04 23:50:49 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* If the "write-side" has been blocked, wake it up now.
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_WANTW) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
1999-02-04 23:50:49 +00:00
|
|
|
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Break if some data was read.
|
1999-02-04 23:50:49 +00:00
|
|
|
*/
|
1999-06-05 03:53:57 +00:00
|
|
|
if (nread > 0)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
1996-07-04 04:36:56 +00:00
|
|
|
|
1999-02-04 23:50:49 +00:00
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Unlock the pipe buffer for our remaining processing. We
|
|
|
|
* will either break out with an error or we will sleep and
|
|
|
|
* relock to loop.
|
1999-02-04 23:50:49 +00:00
|
|
|
*/
|
1999-06-05 03:53:57 +00:00
|
|
|
pipeunlock(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Handle non-blocking mode operation or
|
|
|
|
* wait for more data.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
1999-06-05 03:53:57 +00:00
|
|
|
if (fp->f_flag & FNONBLOCK)
|
|
|
|
error = EAGAIN;
|
|
|
|
else {
|
|
|
|
rpipe->pipe_state |= PIPE_WANTR;
|
|
|
|
if ((error = tsleep(rpipe, PRIBIO|PCATCH, "piperd", 0)) == 0)
|
|
|
|
error = pipelock(rpipe, 1);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
1999-06-05 03:53:57 +00:00
|
|
|
if (error)
|
|
|
|
goto unlocked_error;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
1999-06-05 03:53:57 +00:00
|
|
|
pipeunlock(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
1997-03-22 06:53:45 +00:00
|
|
|
if (error == 0)
|
1999-12-26 13:04:52 +00:00
|
|
|
vfs_timestamp(&rpipe->pipe_atime);
|
1999-06-05 03:53:57 +00:00
|
|
|
unlocked_error:
|
1996-01-28 23:38:26 +00:00
|
|
|
--rpipe->pipe_busy;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PIPE_WANT processing only makes sense if pipe_busy is 0.
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
|
|
|
|
rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
|
|
|
|
wakeup(rpipe);
|
|
|
|
} else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Handle write blocking hysteresis.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_WANTW) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
|
1996-03-25 01:48:28 +00:00
|
|
|
if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(rpipe);
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* Map the sending processes' buffer into kernel space and wire it.
|
|
|
|
* This is similar to a physical write operation.
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
static int
|
1996-02-04 19:56:35 +00:00
|
|
|
pipe_build_write_buffer(wpipe, uio)
|
|
|
|
struct pipe *wpipe;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct uio *uio;
|
|
|
|
{
|
1996-10-11 02:27:30 +00:00
|
|
|
u_int size;
|
1996-02-04 19:56:35 +00:00
|
|
|
int i;
|
|
|
|
vm_offset_t addr, endaddr, paddr;
|
|
|
|
|
1996-10-11 02:27:30 +00:00
|
|
|
size = (u_int) uio->uio_iov->iov_len;
|
1996-02-04 19:56:35 +00:00
|
|
|
if (size > wpipe->pipe_buffer.size)
|
|
|
|
size = wpipe->pipe_buffer.size;
|
|
|
|
|
1998-10-13 08:24:45 +00:00
|
|
|
endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
|
|
|
|
for(i = 0, addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
|
1996-02-04 19:56:35 +00:00
|
|
|
addr < endaddr;
|
|
|
|
addr += PAGE_SIZE, i+=1) {
|
|
|
|
|
|
|
|
vm_page_t m;
|
|
|
|
|
1999-09-20 19:08:48 +00:00
|
|
|
if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
|
|
|
|
(paddr = pmap_kextract(addr)) == 0) {
|
1996-02-04 19:56:35 +00:00
|
|
|
int j;
|
|
|
|
for(j=0;j<i;j++)
|
1998-10-28 13:37:02 +00:00
|
|
|
vm_page_unwire(wpipe->pipe_map.ms[j], 1);
|
1996-02-04 19:56:35 +00:00
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
m = PHYS_TO_VM_PAGE(paddr);
|
|
|
|
vm_page_wire(m);
|
|
|
|
wpipe->pipe_map.ms[i] = m;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set up the control block
|
|
|
|
*/
|
|
|
|
wpipe->pipe_map.npages = i;
|
|
|
|
wpipe->pipe_map.pos = ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
|
|
|
|
wpipe->pipe_map.cnt = size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* and map the buffer
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_map.kva == 0) {
|
1996-02-05 05:17:15 +00:00
|
|
|
/*
|
|
|
|
* We need to allocate space for an extra page because the
|
|
|
|
* address range might (will) span pages at times.
|
|
|
|
*/
|
1996-02-04 19:56:35 +00:00
|
|
|
wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
|
1996-02-05 05:17:15 +00:00
|
|
|
wpipe->pipe_buffer.size + PAGE_SIZE);
|
|
|
|
amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
|
|
|
|
wpipe->pipe_map.npages);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* and update the uio data
|
|
|
|
*/
|
|
|
|
|
|
|
|
uio->uio_iov->iov_len -= size;
|
|
|
|
uio->uio_iov->iov_base += size;
|
|
|
|
if (uio->uio_iov->iov_len == 0)
|
|
|
|
uio->uio_iov++;
|
|
|
|
uio->uio_resid -= size;
|
|
|
|
uio->uio_offset += size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmap and unwire the process buffer
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pipe_destroy_write_buffer(wpipe)
|
|
|
|
struct pipe *wpipe;
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
if (wpipe->pipe_map.kva) {
|
1996-07-13 22:52:50 +00:00
|
|
|
pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
|
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
if (amountpipekva > MAXPIPEKVA) {
|
|
|
|
vm_offset_t kva = wpipe->pipe_map.kva;
|
|
|
|
wpipe->pipe_map.kva = 0;
|
|
|
|
kmem_free(kernel_map, kva,
|
1996-02-05 05:17:15 +00:00
|
|
|
wpipe->pipe_buffer.size + PAGE_SIZE);
|
|
|
|
amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i=0;i<wpipe->pipe_map.npages;i++)
|
1998-10-28 13:37:02 +00:00
|
|
|
vm_page_unwire(wpipe->pipe_map.ms[i], 1);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the case of a signal, the writing process might go away. This
|
|
|
|
* code copies the data into the circular buffer so that the source
|
|
|
|
* pages can be freed without loss of data.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pipe_clone_write_buffer(wpipe)
|
|
|
|
struct pipe *wpipe;
|
|
|
|
{
|
|
|
|
int size;
|
|
|
|
int pos;
|
|
|
|
|
|
|
|
size = wpipe->pipe_map.cnt;
|
|
|
|
pos = wpipe->pipe_map.pos;
|
|
|
|
bcopy((caddr_t) wpipe->pipe_map.kva+pos,
|
|
|
|
(caddr_t) wpipe->pipe_buffer.buffer,
|
|
|
|
size);
|
|
|
|
|
|
|
|
wpipe->pipe_buffer.in = size;
|
|
|
|
wpipe->pipe_buffer.out = 0;
|
|
|
|
wpipe->pipe_buffer.cnt = size;
|
|
|
|
wpipe->pipe_state &= ~PIPE_DIRECTW;
|
|
|
|
|
|
|
|
pipe_destroy_write_buffer(wpipe);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This implements the pipe buffer write mechanism. Note that only
|
|
|
|
* a direct write OR a normal pipe write can be pending at any given time.
|
|
|
|
* If there are any characters in the pipe buffer, the direct write will
|
|
|
|
* be deferred until the receiving process grabs all of the bytes from
|
|
|
|
* the pipe buffer. Then the direct mapping write is set-up.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
pipe_direct_write(wpipe, uio)
|
|
|
|
struct pipe *wpipe;
|
|
|
|
struct uio *uio;
|
|
|
|
{
|
|
|
|
int error;
|
1996-02-07 06:41:56 +00:00
|
|
|
retry:
|
1996-02-04 19:56:35 +00:00
|
|
|
while (wpipe->pipe_state & PIPE_DIRECTW) {
|
1996-02-07 06:41:56 +00:00
|
|
|
if ( wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
1996-02-09 04:36:36 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
1996-02-04 19:56:35 +00:00
|
|
|
error = tsleep(wpipe,
|
|
|
|
PRIBIO|PCATCH, "pipdww", 0);
|
1996-03-25 01:48:28 +00:00
|
|
|
if (error)
|
1996-02-04 19:56:35 +00:00
|
|
|
goto error1;
|
1996-03-25 01:48:28 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
error = EPIPE;
|
|
|
|
goto error1;
|
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
|
1996-02-07 06:41:56 +00:00
|
|
|
if (wpipe->pipe_buffer.cnt > 0) {
|
|
|
|
if ( wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
|
|
|
|
1996-02-09 04:36:36 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
1996-02-04 19:56:35 +00:00
|
|
|
error = tsleep(wpipe,
|
|
|
|
PRIBIO|PCATCH, "pipdwc", 0);
|
1996-03-25 01:48:28 +00:00
|
|
|
if (error)
|
|
|
|
goto error1;
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
error = EPIPE;
|
1996-02-04 19:56:35 +00:00
|
|
|
goto error1;
|
|
|
|
}
|
1996-02-07 06:41:56 +00:00
|
|
|
goto retry;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
1996-02-07 06:41:56 +00:00
|
|
|
wpipe->pipe_state |= PIPE_DIRECTW;
|
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
error = pipe_build_write_buffer(wpipe, uio);
|
|
|
|
if (error) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_DIRECTW;
|
|
|
|
goto error1;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
pipelock(wpipe, 0);
|
|
|
|
pipe_destroy_write_buffer(wpipe);
|
|
|
|
pipeunlock(wpipe);
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
1996-03-25 01:48:28 +00:00
|
|
|
error = EPIPE;
|
|
|
|
goto error1;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-02-09 04:36:36 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
error = tsleep(wpipe, PRIBIO|PCATCH, "pipdwt", 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
pipelock(wpipe,0);
|
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW) {
|
|
|
|
/*
|
|
|
|
* this bit of trickery substitutes a kernel buffer for
|
|
|
|
* the process that might be going away.
|
|
|
|
*/
|
|
|
|
pipe_clone_write_buffer(wpipe);
|
|
|
|
} else {
|
|
|
|
pipe_destroy_write_buffer(wpipe);
|
|
|
|
}
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error1:
|
|
|
|
wakeup(wpipe);
|
|
|
|
return error;
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-07-04 04:36:56 +00:00
|
|
|
static int
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
pipe_write(fp, uio, cred, flags, p)
|
1996-07-04 04:36:56 +00:00
|
|
|
struct file *fp;
|
1996-02-04 19:56:35 +00:00
|
|
|
struct uio *uio;
|
1996-07-04 04:36:56 +00:00
|
|
|
struct ucred *cred;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
struct proc *p;
|
1999-04-04 21:41:28 +00:00
|
|
|
int flags;
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
1996-01-28 23:38:26 +00:00
|
|
|
int error = 0;
|
1996-02-05 05:50:34 +00:00
|
|
|
int orig_resid;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
1996-07-04 04:36:56 +00:00
|
|
|
struct pipe *wpipe, *rpipe;
|
|
|
|
|
|
|
|
rpipe = (struct pipe *) fp->f_data;
|
|
|
|
wpipe = rpipe->pipe_peer;
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* detect loss of pipe read side, issue SIGPIPE if lost.
|
|
|
|
*/
|
1996-07-04 04:36:56 +00:00
|
|
|
if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
|
1996-01-31 02:05:12 +00:00
|
|
|
return EPIPE;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
1996-07-13 22:52:50 +00:00
|
|
|
/*
|
|
|
|
* If it is advantageous to resize the pipe buffer, do
|
|
|
|
* so.
|
|
|
|
*/
|
|
|
|
if ((uio->uio_resid > PIPE_SIZE) &&
|
|
|
|
(nbigpipe < LIMITBIGPIPES) &&
|
|
|
|
(wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
|
|
|
|
(wpipe->pipe_buffer.size <= PIPE_SIZE) &&
|
|
|
|
(wpipe->pipe_buffer.cnt == 0)) {
|
|
|
|
|
|
|
|
if (wpipe->pipe_buffer.buffer) {
|
|
|
|
amountpipekva -= wpipe->pipe_buffer.size;
|
|
|
|
kmem_free(kernel_map,
|
|
|
|
(vm_offset_t)wpipe->pipe_buffer.buffer,
|
|
|
|
wpipe->pipe_buffer.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef PIPE_NODIRECT
|
|
|
|
if (wpipe->pipe_map.kva) {
|
|
|
|
amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
|
|
|
|
kmem_free(kernel_map,
|
|
|
|
wpipe->pipe_map.kva,
|
|
|
|
wpipe->pipe_buffer.size + PAGE_SIZE);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
wpipe->pipe_buffer.in = 0;
|
|
|
|
wpipe->pipe_buffer.out = 0;
|
|
|
|
wpipe->pipe_buffer.cnt = 0;
|
|
|
|
wpipe->pipe_buffer.size = BIG_PIPE_SIZE;
|
|
|
|
wpipe->pipe_buffer.buffer = NULL;
|
|
|
|
++nbigpipe;
|
|
|
|
|
|
|
|
#ifndef PIPE_NODIRECT
|
|
|
|
wpipe->pipe_map.cnt = 0;
|
|
|
|
wpipe->pipe_map.kva = 0;
|
|
|
|
wpipe->pipe_map.pos = 0;
|
|
|
|
wpipe->pipe_map.npages = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
if( wpipe->pipe_buffer.buffer == NULL) {
|
|
|
|
if ((error = pipelock(wpipe,1)) == 0) {
|
|
|
|
pipespace(wpipe);
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
} else {
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
++wpipe->pipe_busy;
|
1996-02-05 05:50:34 +00:00
|
|
|
orig_resid = uio->uio_resid;
|
1996-01-28 23:38:26 +00:00
|
|
|
while (uio->uio_resid) {
|
1996-02-04 19:56:35 +00:00
|
|
|
int space;
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* If the transfer is large, we can gain performance if
|
|
|
|
* we do process-to-process copies directly.
|
1996-06-17 05:15:01 +00:00
|
|
|
* If the write is non-blocking, we don't use the
|
|
|
|
* direct write mechanism.
|
2000-03-24 00:47:37 +00:00
|
|
|
*
|
|
|
|
* The direct write mechanism will detect the reader going
|
|
|
|
* away on us.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
1996-07-13 22:52:50 +00:00
|
|
|
if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
|
|
|
|
(fp->f_flag & FNONBLOCK) == 0 &&
|
|
|
|
(wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) &&
|
1996-02-04 19:56:35 +00:00
|
|
|
(uio->uio_iov->iov_len >= PIPE_MINDIRECT)) {
|
|
|
|
error = pipe_direct_write( wpipe, uio);
|
|
|
|
if (error) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pipe buffered writes cannot be coincidental with
|
|
|
|
* direct writes. We wait until the currently executing
|
|
|
|
* direct write is completed before we start filling the
|
2000-03-24 00:47:37 +00:00
|
|
|
* pipe buffer. We break out if a signal occurs or the
|
|
|
|
* reader goes away.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
|
|
|
retrywrite:
|
|
|
|
while (wpipe->pipe_state & PIPE_DIRECTW) {
|
1996-02-09 04:36:36 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
2000-03-24 00:47:37 +00:00
|
|
|
error = tsleep(wpipe, PRIBIO|PCATCH, "pipbww", 0);
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF)
|
|
|
|
break;
|
1996-02-04 19:56:35 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
2000-03-24 00:47:37 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
error = EPIPE;
|
|
|
|
break;
|
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
|
1996-03-17 04:52:10 +00:00
|
|
|
|
|
|
|
/* Writes of size <= PIPE_BUF must be atomic. */
|
1996-02-05 05:50:34 +00:00
|
|
|
if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
|
|
|
|
space = 0;
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-07-13 22:52:50 +00:00
|
|
|
if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
|
1996-02-04 19:56:35 +00:00
|
|
|
if ((error = pipelock(wpipe,1)) == 0) {
|
1999-12-13 02:55:47 +00:00
|
|
|
int size; /* Transfer size */
|
|
|
|
int segsize; /* first segment to transfer */
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* It is possible for a direct write to
|
|
|
|
* slip in on us... handle it here...
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW) {
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
goto retrywrite;
|
|
|
|
}
|
1999-12-13 02:55:47 +00:00
|
|
|
/*
|
|
|
|
* If a process blocked in uiomove, our
|
|
|
|
* value for space might be bad.
|
2000-03-24 00:47:37 +00:00
|
|
|
*
|
|
|
|
* XXX will we be ok if the reader has gone
|
|
|
|
* away here?
|
1999-12-13 02:55:47 +00:00
|
|
|
*/
|
|
|
|
if (space > wpipe->pipe_buffer.size -
|
|
|
|
wpipe->pipe_buffer.cnt) {
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
goto retrywrite;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer size is minimum of uio transfer
|
|
|
|
* and free space in pipe buffer.
|
|
|
|
*/
|
|
|
|
if (space > uio->uio_resid)
|
|
|
|
size = uio->uio_resid;
|
|
|
|
else
|
|
|
|
size = space;
|
|
|
|
/*
|
|
|
|
* First segment to transfer is minimum of
|
|
|
|
* transfer size and contiguous space in
|
|
|
|
* pipe buffer. If first segment to transfer
|
|
|
|
* is less than the transfer size, we've got
|
|
|
|
* a wraparound in the buffer.
|
|
|
|
*/
|
|
|
|
segsize = wpipe->pipe_buffer.size -
|
|
|
|
wpipe->pipe_buffer.in;
|
|
|
|
if (segsize > size)
|
|
|
|
segsize = size;
|
|
|
|
|
|
|
|
/* Transfer first segment */
|
|
|
|
|
|
|
|
error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
|
|
|
|
segsize, uio);
|
|
|
|
|
|
|
|
if (error == 0 && segsize < size) {
|
|
|
|
/*
|
|
|
|
* Transfer remaining part now, to
|
|
|
|
* support atomic writes. Wraparound
|
|
|
|
* happened.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_buffer.in + segsize !=
|
|
|
|
wpipe->pipe_buffer.size)
|
|
|
|
panic("Expected pipe buffer wraparound disappeared");
|
|
|
|
|
|
|
|
error = uiomove(&wpipe->pipe_buffer.buffer[0],
|
|
|
|
size - segsize, uio);
|
|
|
|
}
|
|
|
|
if (error == 0) {
|
|
|
|
wpipe->pipe_buffer.in += size;
|
|
|
|
if (wpipe->pipe_buffer.in >=
|
|
|
|
wpipe->pipe_buffer.size) {
|
|
|
|
if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
|
|
|
|
panic("Expected wraparound bad");
|
|
|
|
wpipe->pipe_buffer.in = size - segsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
wpipe->pipe_buffer.cnt += size;
|
|
|
|
if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
|
|
|
|
panic("Pipe buffer overflow");
|
|
|
|
|
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
pipeunlock(wpipe);
|
|
|
|
}
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the "read-side" has been blocked, wake it up now.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* don't block on non-blocking I/O
|
|
|
|
*/
|
1996-07-04 04:36:56 +00:00
|
|
|
if (fp->f_flag & FNONBLOCK) {
|
1996-02-04 19:56:35 +00:00
|
|
|
error = EAGAIN;
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* We have no more space and have something to offer,
|
1997-09-14 02:43:25 +00:00
|
|
|
* wake up select/poll.
|
1996-02-11 22:09:50 +00:00
|
|
|
*/
|
|
|
|
pipeselwakeup(wpipe);
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = tsleep(wpipe, (PRIBIO+1)|PCATCH, "pipewr", 0)) != 0) {
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If read side wants to go away, we just issue a signal
|
|
|
|
* to ourselves.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
1996-01-31 02:05:12 +00:00
|
|
|
error = EPIPE;
|
1996-02-04 19:56:35 +00:00
|
|
|
break;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1996-03-17 04:52:10 +00:00
|
|
|
--wpipe->pipe_busy;
|
1996-01-28 23:38:26 +00:00
|
|
|
if ((wpipe->pipe_busy == 0) &&
|
|
|
|
(wpipe->pipe_state & PIPE_WANT)) {
|
|
|
|
wpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTR);
|
|
|
|
wakeup(wpipe);
|
|
|
|
} else if (wpipe->pipe_buffer.cnt > 0) {
|
|
|
|
/*
|
|
|
|
* If we have put any characters in the buffer, we wake up
|
|
|
|
* the reader.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
|
|
|
}
|
1996-02-04 22:09:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't return EPIPE if I/O was successful
|
|
|
|
*/
|
1996-02-04 19:56:35 +00:00
|
|
|
if ((wpipe->pipe_buffer.cnt == 0) &&
|
|
|
|
(uio->uio_resid == 0) &&
|
|
|
|
(error == EPIPE))
|
|
|
|
error = 0;
|
1996-02-05 05:50:34 +00:00
|
|
|
|
1997-03-22 06:53:45 +00:00
|
|
|
if (error == 0)
|
1999-12-26 13:04:52 +00:00
|
|
|
vfs_timestamp(&wpipe->pipe_mtime);
|
1997-03-22 06:53:45 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* We have something to offer,
|
1997-09-14 02:43:25 +00:00
|
|
|
* wake up select/poll.
|
1996-02-11 22:09:50 +00:00
|
|
|
*/
|
1996-02-22 03:33:52 +00:00
|
|
|
if (wpipe->pipe_buffer.cnt)
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we implement a very minimal set of ioctls for compatibility with sockets.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
pipe_ioctl(fp, cmd, data, p)
|
|
|
|
struct file *fp;
|
1998-06-07 17:13:14 +00:00
|
|
|
u_long cmd;
|
1996-01-28 23:38:26 +00:00
|
|
|
register caddr_t data;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
register struct pipe *mpipe = (struct pipe *)fp->f_data;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
case FIONBIO:
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
case FIOASYNC:
|
|
|
|
if (*(int *)data) {
|
|
|
|
mpipe->pipe_state |= PIPE_ASYNC;
|
|
|
|
} else {
|
|
|
|
mpipe->pipe_state &= ~PIPE_ASYNC;
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
case FIONREAD:
|
1996-02-11 22:09:50 +00:00
|
|
|
if (mpipe->pipe_state & PIPE_DIRECTW)
|
|
|
|
*(int *)data = mpipe->pipe_map.cnt;
|
|
|
|
else
|
|
|
|
*(int *)data = mpipe->pipe_buffer.cnt;
|
1996-01-28 23:38:26 +00:00
|
|
|
return (0);
|
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
case FIOSETOWN:
|
|
|
|
return (fsetown(*(int *)data, &mpipe->pipe_sigio));
|
|
|
|
|
|
|
|
case FIOGETOWN:
|
|
|
|
*(int *)data = fgetown(mpipe->pipe_sigio);
|
1996-01-28 23:38:26 +00:00
|
|
|
return (0);
|
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/* This is deprecated, FIOSETOWN should be used instead. */
|
|
|
|
case TIOCSPGRP:
|
|
|
|
return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
|
|
|
|
|
|
|
|
/* This is deprecated, FIOGETOWN should be used instead. */
|
1996-10-11 02:27:30 +00:00
|
|
|
case TIOCGPGRP:
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
*(int *)data = -fgetown(mpipe->pipe_sigio);
|
1996-01-28 23:38:26 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
}
|
1996-07-12 08:14:58 +00:00
|
|
|
return (ENOTTY);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
1997-09-14 02:43:25 +00:00
|
|
|
pipe_poll(fp, events, cred, p)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct file *fp;
|
1997-09-14 02:43:25 +00:00
|
|
|
int events;
|
|
|
|
struct ucred *cred;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
register struct pipe *rpipe = (struct pipe *)fp->f_data;
|
|
|
|
struct pipe *wpipe;
|
1997-09-14 02:43:25 +00:00
|
|
|
int revents = 0;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
wpipe = rpipe->pipe_peer;
|
1997-09-14 02:43:25 +00:00
|
|
|
if (events & (POLLIN | POLLRDNORM))
|
|
|
|
if ((rpipe->pipe_state & PIPE_DIRECTW) ||
|
|
|
|
(rpipe->pipe_buffer.cnt > 0) ||
|
|
|
|
(rpipe->pipe_state & PIPE_EOF))
|
|
|
|
revents |= events & (POLLIN | POLLRDNORM);
|
|
|
|
|
|
|
|
if (events & (POLLOUT | POLLWRNORM))
|
|
|
|
if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) ||
|
1999-01-28 00:57:57 +00:00
|
|
|
(((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
|
|
|
|
(wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
|
1997-09-14 02:43:25 +00:00
|
|
|
revents |= events & (POLLOUT | POLLWRNORM);
|
|
|
|
|
|
|
|
if ((rpipe->pipe_state & PIPE_EOF) ||
|
|
|
|
(wpipe == NULL) ||
|
|
|
|
(wpipe->pipe_state & PIPE_EOF))
|
|
|
|
revents |= POLLHUP;
|
|
|
|
|
|
|
|
if (revents == 0) {
|
|
|
|
if (events & (POLLIN | POLLRDNORM)) {
|
|
|
|
selrecord(p, &rpipe->pipe_sel);
|
|
|
|
rpipe->pipe_state |= PIPE_SEL;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
1997-09-14 02:43:25 +00:00
|
|
|
|
|
|
|
if (events & (POLLOUT | POLLWRNORM)) {
|
1997-10-06 08:30:08 +00:00
|
|
|
selrecord(p, &wpipe->pipe_sel);
|
|
|
|
wpipe->pipe_state |= PIPE_SEL;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
1997-09-14 02:43:25 +00:00
|
|
|
|
|
|
|
return (revents);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
1999-11-08 03:28:49 +00:00
|
|
|
static int
|
|
|
|
pipe_stat(fp, ub, p)
|
|
|
|
struct file *fp;
|
|
|
|
struct stat *ub;
|
|
|
|
struct proc *p;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
1999-11-08 03:28:49 +00:00
|
|
|
struct pipe *pipe = (struct pipe *)fp->f_data;
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
bzero((caddr_t)ub, sizeof (*ub));
|
1996-07-12 08:14:58 +00:00
|
|
|
ub->st_mode = S_IFIFO;
|
1996-02-04 19:56:35 +00:00
|
|
|
ub->st_blksize = pipe->pipe_buffer.size;
|
1996-01-28 23:38:26 +00:00
|
|
|
ub->st_size = pipe->pipe_buffer.cnt;
|
|
|
|
ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
|
1998-03-26 20:54:05 +00:00
|
|
|
ub->st_atimespec = pipe->pipe_atime;
|
|
|
|
ub->st_mtimespec = pipe->pipe_mtime;
|
|
|
|
ub->st_ctimespec = pipe->pipe_ctime;
|
2000-05-11 22:08:20 +00:00
|
|
|
ub->st_uid = fp->f_cred->cr_uid;
|
|
|
|
ub->st_gid = fp->f_cred->cr_gid;
|
1996-07-12 08:14:58 +00:00
|
|
|
/*
|
2000-05-11 22:08:20 +00:00
|
|
|
* Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
|
1996-07-12 08:14:58 +00:00
|
|
|
* XXX (st_dev, st_ino) should be unique.
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
pipe_close(fp, p)
|
|
|
|
struct file *fp;
|
|
|
|
struct proc *p;
|
|
|
|
{
|
|
|
|
struct pipe *cpipe = (struct pipe *)fp->f_data;
|
1996-06-12 05:11:41 +00:00
|
|
|
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_ops = &badfileops;
|
|
|
|
fp->f_data = NULL;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
funsetown(cpipe->pipe_sigio);
|
1996-01-28 23:38:26 +00:00
|
|
|
pipeclose(cpipe);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* shutdown the pipe
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pipeclose(cpipe)
|
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
1996-02-04 19:56:35 +00:00
|
|
|
struct pipe *ppipe;
|
1996-01-28 23:38:26 +00:00
|
|
|
if (cpipe) {
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(cpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* If the other side is blocked, wake it up saying that
|
|
|
|
* we want to close it down.
|
|
|
|
*/
|
|
|
|
while (cpipe->pipe_busy) {
|
|
|
|
wakeup(cpipe);
|
|
|
|
cpipe->pipe_state |= PIPE_WANT|PIPE_EOF;
|
|
|
|
tsleep(cpipe, PRIBIO, "pipecl", 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disconnect from peer
|
|
|
|
*/
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((ppipe = cpipe->pipe_peer) != NULL) {
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(ppipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
ppipe->pipe_state |= PIPE_EOF;
|
|
|
|
wakeup(ppipe);
|
|
|
|
ppipe->pipe_peer = NULL;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* free resources
|
|
|
|
*/
|
1996-02-04 19:56:35 +00:00
|
|
|
if (cpipe->pipe_buffer.buffer) {
|
1996-07-13 22:52:50 +00:00
|
|
|
if (cpipe->pipe_buffer.size > PIPE_SIZE)
|
|
|
|
--nbigpipe;
|
1996-02-04 19:56:35 +00:00
|
|
|
amountpipekva -= cpipe->pipe_buffer.size;
|
|
|
|
kmem_free(kernel_map,
|
|
|
|
(vm_offset_t)cpipe->pipe_buffer.buffer,
|
|
|
|
cpipe->pipe_buffer.size);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
if (cpipe->pipe_map.kva) {
|
1996-02-05 05:17:15 +00:00
|
|
|
amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
|
1996-02-04 19:56:35 +00:00
|
|
|
kmem_free(kernel_map,
|
|
|
|
cpipe->pipe_map.kva,
|
1996-02-05 05:17:15 +00:00
|
|
|
cpipe->pipe_buffer.size + PAGE_SIZE);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1997-08-05 00:02:08 +00:00
|
|
|
zfree(pipe_zone, cpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
filt_pipeattach(struct knote *kn)
|
|
|
|
{
|
|
|
|
struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
|
|
|
|
|
|
|
|
SLIST_INSERT_HEAD(&rpipe->pipe_sel.si_note, kn, kn_selnext);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
filt_pipedetach(struct knote *kn)
|
|
|
|
{
|
|
|
|
struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
|
|
|
|
|
2000-05-26 02:09:24 +00:00
|
|
|
SLIST_REMOVE(&rpipe->pipe_sel.si_note, kn, knote, kn_selnext);
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_piperead(struct knote *kn, long hint)
|
|
|
|
{
|
|
|
|
struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
|
|
|
|
struct pipe *wpipe = rpipe->pipe_peer;
|
|
|
|
|
|
|
|
kn->kn_data = rpipe->pipe_buffer.cnt;
|
|
|
|
if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
|
|
|
|
kn->kn_data = rpipe->pipe_map.cnt;
|
|
|
|
|
|
|
|
if ((rpipe->pipe_state & PIPE_EOF) ||
|
|
|
|
(wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
|
|
|
|
kn->kn_flags |= EV_EOF;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
return (kn->kn_data > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_pipewrite(struct knote *kn, long hint)
|
|
|
|
{
|
|
|
|
struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
|
|
|
|
struct pipe *wpipe = rpipe->pipe_peer;
|
|
|
|
|
|
|
|
if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
|
|
|
|
kn->kn_data = 0;
|
|
|
|
kn->kn_flags |= EV_EOF;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
|
2000-09-14 20:10:19 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW)
|
2000-04-16 18:53:38 +00:00
|
|
|
kn->kn_data = 0;
|
|
|
|
|
|
|
|
return (kn->kn_data >= PIPE_BUF);
|
|
|
|
}
|