merge pipe and fifo implementations

Also reviewed by: jhb, jilles (initial revision)
Tested by: pho, jilles

Submitted by:	gianni
Reviewed by:	bde
This commit is contained in:
Kip Macy 2012-02-23 18:37:30 +00:00
parent a93cda789a
commit 11ac7ec076
4 changed files with 225 additions and 450 deletions

View File

@ -33,6 +33,7 @@
/*
* Prototypes for fifo operations on vnodes.
*/
int fifo_iseof(struct file *);
int fifo_vnoperate(struct vop_generic_args *);
int fifo_printinfo(struct vnode *);

View File

@ -2,6 +2,7 @@
* Copyright (c) 1990, 1993, 1995
* The Regents of the University of California.
* Copyright (c) 2005 Robert N. M. Watson
* Copyright (c) 2012 Giovanni Trematerra
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -42,11 +43,10 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/poll.h>
#include <sys/selinfo.h>
#include <sys/pipe.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sx.h>
#include <sys/systm.h>
#include <sys/un.h>
@ -54,40 +54,16 @@
#include <sys/vnode.h>
#include <fs/fifofs/fifo.h>
static fo_rdwr_t fifo_read_f;
static fo_rdwr_t fifo_write_f;
static fo_ioctl_t fifo_ioctl_f;
static fo_poll_t fifo_poll_f;
static fo_kqfilter_t fifo_kqfilter_f;
static fo_stat_t fifo_stat_f;
static fo_close_t fifo_close_f;
static fo_truncate_t fifo_truncate_f;
struct fileops fifo_ops_f = {
.fo_read = fifo_read_f,
.fo_write = fifo_write_f,
.fo_truncate = fifo_truncate_f,
.fo_ioctl = fifo_ioctl_f,
.fo_poll = fifo_poll_f,
.fo_kqfilter = fifo_kqfilter_f,
.fo_stat = fifo_stat_f,
.fo_close = fifo_close_f,
.fo_chmod = vn_chmod,
.fo_chown = vn_chown,
.fo_flags = DFLAG_PASSABLE
};
/*
* This structure is associated with the FIFO vnode and stores
* the state associated with the FIFO.
* Notes about locking:
* - fi_readsock and fi_writesock are invariant since init time.
* - fi_readers and fi_writers are vnode lock protected.
* - fi_wgen is fif_mtx lock protected.
* - fi_pipe is invariant since init time.
* - fi_readers and fi_writers are protected by the vnode lock.
* - fi_wgen is protected by the pipe mutex.
*/
struct fifoinfo {
struct socket *fi_readsock;
struct socket *fi_writesock;
struct pipe *fi_pipe;
long fi_readers;
long fi_writers;
int fi_wgen;
@ -99,29 +75,6 @@ static vop_close_t fifo_close;
static vop_pathconf_t fifo_pathconf;
static vop_advlock_t fifo_advlock;
static void filt_fifordetach(struct knote *kn);
static int filt_fiforead(struct knote *kn, long hint);
static void filt_fifowdetach(struct knote *kn);
static int filt_fifowrite(struct knote *kn, long hint);
static void filt_fifodetach_notsup(struct knote *kn);
static int filt_fifo_notsup(struct knote *kn, long hint);
static struct filterops fiforead_filtops = {
.f_isfd = 1,
.f_detach = filt_fifordetach,
.f_event = filt_fiforead,
};
static struct filterops fifowrite_filtops = {
.f_isfd = 1,
.f_detach = filt_fifowdetach,
.f_event = filt_fifowrite,
};
static struct filterops fifo_notsup_filtops = {
.f_isfd = 1,
.f_detach = filt_fifodetach_notsup,
.f_event = filt_fifo_notsup,
};
struct vop_vector fifo_specops = {
.vop_default = &default_vnodeops,
@ -150,22 +103,19 @@ struct vop_vector fifo_specops = {
.vop_write = VOP_PANIC,
};
struct mtx fifo_mtx;
MTX_SYSINIT(fifo, &fifo_mtx, "fifo mutex", MTX_DEF);
/*
* Dispose of fifo resources.
*/
static void
fifo_cleanup(struct vnode *vp)
{
struct fifoinfo *fip = vp->v_fifoinfo;
struct fifoinfo *fip;
ASSERT_VOP_ELOCKED(vp, "fifo_cleanup");
fip = vp->v_fifoinfo;
if (fip->fi_readers == 0 && fip->fi_writers == 0) {
vp->v_fifoinfo = NULL;
(void)soclose(fip->fi_readsock);
(void)soclose(fip->fi_writesock);
pipe_dtor(fip->fi_pipe);
free(fip, M_VNODE);
}
}
@ -185,101 +135,80 @@ fifo_open(ap)
struct file *a_fp;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct vnode *vp;
struct file *fp;
struct thread *td;
struct fifoinfo *fip;
struct thread *td = ap->a_td;
struct ucred *cred = ap->a_cred;
struct file *fp = ap->a_fp;
struct socket *rso, *wso;
struct pipe *fpipe;
int error;
vp = ap->a_vp;
fp = ap->a_fp;
td = ap->a_td;
ASSERT_VOP_ELOCKED(vp, "fifo_open");
if (fp == NULL)
return (EINVAL);
if ((fip = vp->v_fifoinfo) == NULL) {
fip = malloc(sizeof(*fip), M_VNODE, M_WAITOK);
error = socreate(AF_LOCAL, &rso, SOCK_STREAM, 0, cred, td);
if (error)
goto fail1;
fip->fi_readsock = rso;
error = socreate(AF_LOCAL, &wso, SOCK_STREAM, 0, cred, td);
if (error)
goto fail2;
fip->fi_writesock = wso;
error = soconnect2(wso, rso);
/* Close the direction we do not use, so we can get POLLHUP. */
if (error == 0)
error = soshutdown(rso, SHUT_WR);
if (error) {
(void)soclose(wso);
fail2:
(void)soclose(rso);
fail1:
free(fip, M_VNODE);
error = pipe_named_ctor(&fpipe, td);
if (error != 0)
return (error);
}
fip = malloc(sizeof(*fip), M_VNODE, M_WAITOK);
fip->fi_pipe = fpipe;
fip->fi_wgen = fip->fi_readers = fip->fi_writers = 0;
wso->so_snd.sb_lowat = PIPE_BUF;
SOCKBUF_LOCK(&rso->so_rcv);
rso->so_rcv.sb_state |= SBS_CANTRCVMORE;
SOCKBUF_UNLOCK(&rso->so_rcv);
KASSERT(vp->v_fifoinfo == NULL,
("fifo_open: v_fifoinfo race"));
KASSERT(vp->v_fifoinfo == NULL, ("fifo_open: v_fifoinfo race"));
vp->v_fifoinfo = fip;
}
fpipe = fip->fi_pipe;
KASSERT(fpipe != NULL, ("fifo_open: pipe is NULL"));
/*
* Use the fifo_mtx lock here, in addition to the vnode lock,
* Use the pipe mutex here, in addition to the vnode lock,
* in order to allow vnode lock dropping before msleep() calls
* and still avoiding missed wakeups.
*/
mtx_lock(&fifo_mtx);
PIPE_LOCK(fpipe);
if (ap->a_mode & FREAD) {
fip->fi_readers++;
if (fip->fi_readers == 1) {
SOCKBUF_LOCK(&fip->fi_writesock->so_snd);
fip->fi_writesock->so_snd.sb_state &= ~SBS_CANTSENDMORE;
SOCKBUF_UNLOCK(&fip->fi_writesock->so_snd);
if (fip->fi_writers > 0) {
fpipe->pipe_state &= ~PIPE_EOF;
if (fip->fi_writers > 0)
wakeup(&fip->fi_writers);
sowwakeup(fip->fi_writesock);
}
}
fp->f_seqcount = fip->fi_wgen - fip->fi_writers;
}
if (ap->a_mode & FWRITE) {
if ((ap->a_mode & O_NONBLOCK) && fip->fi_readers == 0) {
mtx_unlock(&fifo_mtx);
PIPE_UNLOCK(fpipe);
if (fip->fi_writers == 0)
fifo_cleanup(vp);
return (ENXIO);
}
fip->fi_writers++;
if (fip->fi_writers == 1) {
SOCKBUF_LOCK(&fip->fi_readsock->so_rcv);
fip->fi_readsock->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
SOCKBUF_UNLOCK(&fip->fi_readsock->so_rcv);
if (fip->fi_readers > 0) {
fpipe->pipe_state &= ~PIPE_EOF;
if (fip->fi_readers > 0)
wakeup(&fip->fi_readers);
sorwakeup(fip->fi_readsock);
}
}
}
if ((ap->a_mode & O_NONBLOCK) == 0) {
if ((ap->a_mode & FREAD) && fip->fi_writers == 0) {
VOP_UNLOCK(vp, 0);
error = msleep(&fip->fi_readers, &fifo_mtx,
error = msleep(&fip->fi_readers, PIPE_MTX(fpipe),
PDROP | PCATCH | PSOCK, "fifoor", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
fip->fi_readers--;
if (fip->fi_readers == 0) {
socantsendmore(fip->fi_writesock);
PIPE_LOCK(fpipe);
fpipe->pipe_state |= PIPE_EOF;
if (fpipe->pipe_state & PIPE_WANTW)
wakeup(fpipe);
PIPE_UNLOCK(fpipe);
fifo_cleanup(vp);
}
return (error);
}
mtx_lock(&fifo_mtx);
PIPE_LOCK(fpipe);
/*
* We must have got woken up because we had a writer.
* That (and not still having one) is the condition
@ -288,16 +217,18 @@ fifo_open(ap)
}
if ((ap->a_mode & FWRITE) && fip->fi_readers == 0) {
VOP_UNLOCK(vp, 0);
error = msleep(&fip->fi_writers, &fifo_mtx,
error = msleep(&fip->fi_writers, PIPE_MTX(fpipe),
PDROP | PCATCH | PSOCK, "fifoow", 0);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
fip->fi_writers--;
if (fip->fi_writers == 0) {
socantrcvmore(fip->fi_readsock);
mtx_lock(&fifo_mtx);
PIPE_LOCK(fpipe);
fpipe->pipe_state |= PIPE_EOF;
if (fpipe->pipe_state & PIPE_WANTR)
wakeup(fpipe);
fip->fi_wgen++;
mtx_unlock(&fifo_mtx);
PIPE_UNLOCK(fpipe);
fifo_cleanup(vp);
}
return (error);
@ -307,82 +238,13 @@ fifo_open(ap)
* a reader. That (and not still having one)
* is the condition that we must wait for.
*/
mtx_lock(&fifo_mtx);
PIPE_LOCK(fpipe);
}
}
mtx_unlock(&fifo_mtx);
PIPE_UNLOCK(fpipe);
KASSERT(fp != NULL, ("can't fifo/vnode bypass"));
KASSERT(fp->f_ops == &badfileops, ("not badfileops in fifo_open"));
finit(fp, fp->f_flag, DTYPE_FIFO, fip, &fifo_ops_f);
return (0);
}
static void
filt_fifordetach(struct knote *kn)
{
struct socket *so = (struct socket *)kn->kn_hook;
SOCKBUF_LOCK(&so->so_rcv);
knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
if (knlist_empty(&so->so_rcv.sb_sel.si_note))
so->so_rcv.sb_flags &= ~SB_KNOTE;
SOCKBUF_UNLOCK(&so->so_rcv);
}
static int
filt_fiforead(struct knote *kn, long hint)
{
struct socket *so = (struct socket *)kn->kn_hook;
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
kn->kn_data = so->so_rcv.sb_cc;
if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
kn->kn_flags |= EV_EOF;
return (1);
} else {
kn->kn_flags &= ~EV_EOF;
return (kn->kn_data > 0);
}
}
static void
filt_fifowdetach(struct knote *kn)
{
struct socket *so = (struct socket *)kn->kn_hook;
SOCKBUF_LOCK(&so->so_snd);
knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
if (knlist_empty(&so->so_snd.sb_sel.si_note))
so->so_snd.sb_flags &= ~SB_KNOTE;
SOCKBUF_UNLOCK(&so->so_snd);
}
static int
filt_fifowrite(struct knote *kn, long hint)
{
struct socket *so = (struct socket *)kn->kn_hook;
SOCKBUF_LOCK_ASSERT(&so->so_snd);
kn->kn_data = sbspace(&so->so_snd);
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
kn->kn_flags |= EV_EOF;
return (1);
} else {
kn->kn_flags &= ~EV_EOF;
return (kn->kn_data >= so->so_snd.sb_lowat);
}
}
static void
filt_fifodetach_notsup(struct knote *kn)
{
}
static int
filt_fifo_notsup(struct knote *kn, long hint)
{
finit(fp, fp->f_flag, DTYPE_FIFO, fpipe, &pipeops);
return (0);
}
@ -399,26 +261,33 @@ fifo_close(ap)
struct thread *a_td;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct fifoinfo *fip = vp->v_fifoinfo;
struct vnode *vp;
struct fifoinfo *fip;
struct pipe *cpipe;
vp = ap->a_vp;
fip = vp->v_fifoinfo;
cpipe = fip->fi_pipe;
ASSERT_VOP_ELOCKED(vp, "fifo_close");
if (fip == NULL) {
printf("fifo_close: no v_fifoinfo %p\n", vp);
return (0);
}
if (ap->a_fflag & FREAD) {
fip->fi_readers--;
if (fip->fi_readers == 0)
socantsendmore(fip->fi_writesock);
if (fip->fi_readers == 0) {
PIPE_LOCK(cpipe);
cpipe->pipe_state |= PIPE_EOF;
if (cpipe->pipe_state & PIPE_WANTW)
wakeup(cpipe);
PIPE_UNLOCK(cpipe);
}
}
if (ap->a_fflag & FWRITE) {
fip->fi_writers--;
if (fip->fi_writers == 0) {
socantrcvmore(fip->fi_readsock);
mtx_lock(&fifo_mtx);
PIPE_LOCK(cpipe);
cpipe->pipe_state |= PIPE_EOF;
if (cpipe->pipe_state & PIPE_WANTR)
wakeup(cpipe);
fip->fi_wgen++;
mtx_unlock(&fifo_mtx);
PIPE_UNLOCK(cpipe);
}
}
fifo_cleanup(vp);
@ -504,212 +373,15 @@ fifo_advlock(ap)
return (ap->a_flags & F_FLOCK ? EOPNOTSUPP : EINVAL);
}
static int
fifo_close_f(struct file *fp, struct thread *td)
{
return (vnops.fo_close(fp, td));
}
/*
* The implementation of ioctl() for named fifos is complicated by the fact
* that we permit O_RDWR fifo file descriptors, meaning that the actions of
* ioctls may have to be applied to both the underlying sockets rather than
* just one. The original implementation simply forward the ioctl to one
* or both sockets based on fp->f_flag. We now consider each ioctl
* separately, as the composition effect requires careful ordering.
*
* We do not blindly pass all ioctls through to the socket in order to avoid
* providing unnecessary ioctls that might be improperly depended on by
* applications (such as socket-specific, routing, and interface ioctls).
*
* Unlike sys_pipe.c, fifos do not implement the deprecated TIOCSPGRP and
* TIOCGPGRP ioctls. Earlier implementations of fifos did forward SIOCSPGRP
* and SIOCGPGRP ioctls, so we might need to re-add those here.
*/
static int
fifo_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred,
struct thread *td)
{
struct fifoinfo *fi;
struct file filetmp; /* Local, so need not be locked. */
int error;
error = ENOTTY;
fi = fp->f_data;
switch (com) {
case FIONBIO:
/*
* Non-blocking I/O is implemented at the fifo layer using
* MSG_NBIO, so does not need to be forwarded down the stack.
*/
return (0);
case FIOASYNC:
case FIOSETOWN:
case FIOGETOWN:
/*
* These socket ioctls don't have any ordering requirements,
* so are called in an arbitrary order, and only on the
* sockets indicated by the file descriptor rights.
*
* XXXRW: If O_RDWR and the read socket accepts an ioctl but
* the write socket doesn't, the socketpair is left in an
* inconsistent state.
*/
if (fp->f_flag & FREAD) {
filetmp.f_data = fi->fi_readsock;
filetmp.f_cred = cred;
error = soo_ioctl(&filetmp, com, data, cred, td);
if (error)
return (error);
}
if (fp->f_flag & FWRITE) {
filetmp.f_data = fi->fi_writesock;
filetmp.f_cred = cred;
error = soo_ioctl(&filetmp, com, data, cred, td);
}
return (error);
case FIONREAD:
/*
* FIONREAD will return 0 for non-readable descriptors, and
* the results of FIONREAD on the read socket for readable
* descriptors.
*/
if (!(fp->f_flag & FREAD)) {
*(int *)data = 0;
return (0);
}
filetmp.f_data = fi->fi_readsock;
filetmp.f_cred = cred;
return (soo_ioctl(&filetmp, com, data, cred, td));
default:
return (ENOTTY);
}
}
/*
* Because fifos are now a file descriptor layer object, EVFILT_VNODE is not
* implemented. Likely, fifo_kqfilter() should be removed, and
* fifo_kqfilter_f() should know how to forward the request to the underling
* vnode using f_vnode in the file descriptor here.
*/
static int
fifo_kqfilter_f(struct file *fp, struct knote *kn)
{
struct fifoinfo *fi;
struct socket *so;
struct sockbuf *sb;
fi = fp->f_data;
/*
* If a filter is requested that is not supported by this file
* descriptor, don't return an error, but also don't ever generate an
* event.
*/
if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) {
kn->kn_fop = &fifo_notsup_filtops;
return (0);
}
if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) {
kn->kn_fop = &fifo_notsup_filtops;
return (0);
}
switch (kn->kn_filter) {
case EVFILT_READ:
kn->kn_fop = &fiforead_filtops;
so = fi->fi_readsock;
sb = &so->so_rcv;
break;
case EVFILT_WRITE:
kn->kn_fop = &fifowrite_filtops;
so = fi->fi_writesock;
sb = &so->so_snd;
break;
default:
return (EINVAL);
}
kn->kn_hook = (caddr_t)so;
SOCKBUF_LOCK(sb);
knlist_add(&sb->sb_sel.si_note, kn, 1);
sb->sb_flags |= SB_KNOTE;
SOCKBUF_UNLOCK(sb);
return (0);
}
static int
fifo_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
int
fifo_iseof(struct file *fp)
{
struct fifoinfo *fip;
struct file filetmp;
int levents, revents = 0;
fip = fp->f_data;
levents = events &
(POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND);
if ((fp->f_flag & FREAD) && levents) {
filetmp.f_data = fip->fi_readsock;
filetmp.f_cred = cred;
mtx_lock(&fifo_mtx);
if (fp->f_seqcount == fip->fi_wgen)
levents |= POLLINIGNEOF;
mtx_unlock(&fifo_mtx);
revents |= soo_poll(&filetmp, levents, cred, td);
}
levents = events & (POLLOUT | POLLWRNORM | POLLWRBAND);
if ((fp->f_flag & FWRITE) && levents) {
filetmp.f_data = fip->fi_writesock;
filetmp.f_cred = cred;
revents |= soo_poll(&filetmp, levents, cred, td);
}
return (revents);
KASSERT(fp->f_vnode != NULL, ("fifo_iseof: no vnode info"));
KASSERT(fp->f_vnode->v_fifoinfo != NULL, ("fifo_iseof: no fifoinfo"));
fip = fp->f_vnode->v_fifoinfo;
PIPE_LOCK_ASSERT(fip->fi_pipe, MA_OWNED);
return (fp->f_seqcount == fip->fi_wgen);
}
static int
fifo_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
{
struct fifoinfo *fip;
int sflags;
fip = fp->f_data;
KASSERT(uio->uio_rw == UIO_READ,("fifo_read mode"));
if (uio->uio_resid == 0)
return (0);
sflags = (fp->f_flag & FNONBLOCK) ? MSG_NBIO : 0;
return (soreceive(fip->fi_readsock, NULL, uio, NULL, NULL, &sflags));
}
static int
fifo_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
{
return (vnops.fo_stat(fp, sb, cred, td));
}
static int
fifo_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
{
return (vnops.fo_truncate(fp, length, cred, td));
}
static int
fifo_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
{
struct fifoinfo *fip;
int sflags;
fip = fp->f_data;
KASSERT(uio->uio_rw == UIO_WRITE,("fifo_write mode"));
sflags = (fp->f_flag & FNONBLOCK) ? MSG_NBIO : 0;
return (sosend(fip->fi_writesock, NULL, uio, 0, NULL, sflags, td));
}

View File

@ -1,5 +1,6 @@
/*-
* Copyright (c) 1996 John S. Dyson
* Copyright (c) 2012 Giovanni Trematerra
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -128,6 +129,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_page.h>
#include <vm/uma.h>
#include <fs/fifofs/fifo.h>
/*
* Use this define if you want to disable *fancy* VM things. Expect an
* approx 30% decrease in transfer rate. This could be useful for
@ -135,6 +138,9 @@ __FBSDID("$FreeBSD$");
*/
/* #define PIPE_NODIRECT */
#define PIPE_PEER(pipe) \
(((pipe)->pipe_state & PIPE_NAMED) ? (pipe) : ((pipe)->pipe_peer))
/*
* interfaces to the outside world
*/
@ -147,7 +153,7 @@ static fo_kqfilter_t pipe_kqfilter;
static fo_stat_t pipe_stat;
static fo_close_t pipe_close;
static struct fileops pipeops = {
struct fileops pipeops = {
.fo_read = pipe_read,
.fo_write = pipe_write,
.fo_truncate = pipe_truncate,
@ -162,9 +168,16 @@ static struct fileops pipeops = {
};
static void filt_pipedetach(struct knote *kn);
static void filt_pipedetach_notsup(struct knote *kn);
static int filt_pipenotsup(struct knote *kn, long hint);
static int filt_piperead(struct knote *kn, long hint);
static int filt_pipewrite(struct knote *kn, long hint);
static struct filterops pipe_nfiltops = {
.f_isfd = 1,
.f_detach = filt_pipedetach_notsup,
.f_event = filt_pipenotsup
};
static struct filterops pipe_rfiltops = {
.f_isfd = 1,
.f_detach = filt_pipedetach,
@ -208,6 +221,7 @@ static void pipeinit(void *dummy __unused);
static void pipeclose(struct pipe *cpipe);
static void pipe_free_kmem(struct pipe *cpipe);
static int pipe_create(struct pipe *pipe, int backing);
static int pipe_paircreate(struct thread *td, struct pipepair **p_pp);
static __inline int pipelock(struct pipe *cpipe, int catch);
static __inline void pipeunlock(struct pipe *cpipe);
static __inline void pipeselwakeup(struct pipe *cpipe);
@ -317,20 +331,14 @@ pipe_zone_fini(void *mem, int size)
mtx_destroy(&pp->pp_mtx);
}
/*
* The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let
* the zone pick up the pieces via pipeclose().
*/
int
kern_pipe(struct thread *td, int fildes[2])
static int
pipe_paircreate(struct thread *td, struct pipepair **p_pp)
{
struct filedesc *fdp = td->td_proc->p_fd;
struct file *rf, *wf;
struct pipepair *pp;
struct pipe *rpipe, *wpipe;
int fd, error;
int error;
pp = uma_zalloc(pipe_zone, M_WAITOK);
*p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK);
#ifdef MAC
/*
* The MAC label is shared between the connected endpoints. As a
@ -356,7 +364,59 @@ kern_pipe(struct thread *td, int fildes[2])
rpipe->pipe_state |= PIPE_DIRECTOK;
wpipe->pipe_state |= PIPE_DIRECTOK;
return (0);
}
int
pipe_named_ctor(struct pipe **ppipe, struct thread *td)
{
struct pipepair *pp;
int error;
error = pipe_paircreate(td, &pp);
if (error != 0)
return (error);
pp->pp_rpipe.pipe_state |= PIPE_NAMED;
*ppipe = &pp->pp_rpipe;
return (0);
}
void
pipe_dtor(struct pipe *dpipe)
{
ino_t ino;
ino = dpipe->pipe_ino;
funsetown(&dpipe->pipe_sigio);
pipeclose(dpipe);
if (dpipe->pipe_state & PIPE_NAMED) {
dpipe = dpipe->pipe_peer;
funsetown(&dpipe->pipe_sigio);
pipeclose(dpipe);
}
if (ino != 0 && ino != (ino_t)-1)
free_unr(pipeino_unr, ino);
}
/*
* The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let
* the zone pick up the pieces via pipeclose().
*/
int
kern_pipe(struct thread *td, int fildes[2])
{
struct filedesc *fdp;
struct file *rf, *wf;
struct pipe *rpipe, *wpipe;
struct pipepair *pp;
int fd, error;
fdp = td->td_proc->p_fd;
error = pipe_paircreate(td, &pp);
if (error != 0)
return (error);
rpipe = &pp->pp_rpipe;
wpipe = &pp->pp_wpipe;
error = falloc(td, &rf, &fd, 0);
if (error) {
pipeclose(rpipe);
@ -582,11 +642,12 @@ pipe_read(fp, uio, active_cred, flags, td)
struct thread *td;
int flags;
{
struct pipe *rpipe = fp->f_data;
struct pipe *rpipe;
int error;
int nread = 0;
u_int size;
rpipe = fp->f_data;
PIPE_LOCK(rpipe);
++rpipe->pipe_busy;
error = pipelock(rpipe, 1);
@ -960,13 +1021,12 @@ pipe_write(fp, uio, active_cred, flags, td)
struct thread *td;
int flags;
{
int error = 0;
int error;
size_t desiredsize, orig_resid;
struct pipe *wpipe, *rpipe;
rpipe = fp->f_data;
wpipe = rpipe->pipe_peer;
wpipe = PIPE_PEER(rpipe);
PIPE_LOCK(rpipe);
error = pipelock(wpipe, 1);
if (error) {
@ -1243,6 +1303,9 @@ pipe_truncate(fp, length, active_cred, td)
struct thread *td;
{
/* For named pipes call the vnode operation. */
if (fp->f_vnode != NULL)
return (vnops.fo_truncate(fp, length, active_cred, td));
return (EINVAL);
}
@ -1285,6 +1348,11 @@ pipe_ioctl(fp, cmd, data, active_cred, td)
break;
case FIONREAD:
if (!(fp->f_flag & FREAD)) {
*(int *)data = 0;
PIPE_UNLOCK(mpipe);
return (0);
}
if (mpipe->pipe_state & PIPE_DIRECTW)
*(int *)data = mpipe->pipe_map.cnt;
else
@ -1327,26 +1395,28 @@ pipe_poll(fp, events, active_cred, td)
struct ucred *active_cred;
struct thread *td;
{
struct pipe *rpipe = fp->f_data;
struct pipe *rpipe;
struct pipe *wpipe;
int revents = 0;
int levents, revents;
#ifdef MAC
int error;
#endif
wpipe = rpipe->pipe_peer;
revents = 0;
rpipe = fp->f_data;
wpipe = PIPE_PEER(rpipe);
PIPE_LOCK(rpipe);
#ifdef MAC
error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
if (error)
goto locked_error;
#endif
if (events & (POLLIN | POLLRDNORM))
if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM))
if ((rpipe->pipe_state & PIPE_DIRECTW) ||
(rpipe->pipe_buffer.cnt > 0))
revents |= events & (POLLIN | POLLRDNORM);
if (events & (POLLOUT | POLLWRNORM))
if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM))
if (wpipe->pipe_present != PIPE_ACTIVE ||
(wpipe->pipe_state & PIPE_EOF) ||
(((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
@ -1354,6 +1424,12 @@ pipe_poll(fp, events, active_cred, td)
wpipe->pipe_buffer.size == 0)))
revents |= events & (POLLOUT | POLLWRNORM);
levents = events &
(POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND);
if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents &&
fifo_iseof(fp))
events |= POLLINIGNEOF;
if ((events & POLLINIGNEOF) == 0) {
if (rpipe->pipe_state & PIPE_EOF) {
revents |= (events & (POLLIN | POLLRDNORM));
@ -1364,13 +1440,13 @@ pipe_poll(fp, events, active_cred, td)
}
if (revents == 0) {
if (events & (POLLIN | POLLRDNORM)) {
if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) {
selrecord(td, &rpipe->pipe_sel);
if (SEL_WAITING(&rpipe->pipe_sel))
rpipe->pipe_state |= PIPE_SEL;
}
if (events & (POLLOUT | POLLWRNORM)) {
if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) {
selrecord(td, &wpipe->pipe_sel);
if (SEL_WAITING(&wpipe->pipe_sel))
wpipe->pipe_state |= PIPE_SEL;
@ -1410,6 +1486,13 @@ pipe_stat(fp, ub, active_cred, td)
return (error);
}
#endif
/* For named pipes ask the underlying filesystem. */
if (pipe->pipe_state & PIPE_NAMED) {
PIPE_UNLOCK(pipe);
return (vnops.fo_stat(fp, ub, active_cred, td));
}
/*
* Lazily allocate an inode number for the pipe. Most pipe
* users do not call fstat(2) on the pipe, which means that
@ -1456,12 +1539,12 @@ pipe_close(fp, td)
struct file *fp;
struct thread *td;
{
struct pipe *cpipe = fp->f_data;
if (fp->f_vnode != NULL)
return vnops.fo_close(fp, td);
fp->f_ops = &badfileops;
pipe_dtor(fp->f_data);
fp->f_data = NULL;
funsetown(&cpipe->pipe_sigio);
pipeclose(cpipe);
return (0);
}
@ -1498,7 +1581,6 @@ pipeclose(cpipe)
{
struct pipepair *pp;
struct pipe *ppipe;
ino_t ino;
KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
@ -1556,12 +1638,6 @@ pipeclose(cpipe)
seldrain(&cpipe->pipe_sel);
knlist_destroy(&cpipe->pipe_sel.si_note);
/*
* Postpone the destroy of the fake inode number allocated for
* our end, until pipe mtx is unlocked.
*/
ino = cpipe->pipe_ino;
/*
* If both endpoints are now closed, release the memory for the
* pipe pair. If not, unlock.
@ -1574,9 +1650,6 @@ pipeclose(cpipe)
uma_zfree(pipe_zone, cpipe->pipe_pair);
} else
PIPE_UNLOCK(cpipe);
if (ino != 0 && ino != (ino_t)-1)
free_unr(pipeino_unr, ino);
}
/*ARGSUSED*/
@ -1585,7 +1658,20 @@ pipe_kqfilter(struct file *fp, struct knote *kn)
{
struct pipe *cpipe;
cpipe = kn->kn_fp->f_data;
/*
* If a filter is requested that is not supported by this file
* descriptor, don't return an error, but also don't ever generate an
* event.
*/
if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) {
kn->kn_fop = &pipe_nfiltops;
return (0);
}
if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) {
kn->kn_fop = &pipe_nfiltops;
return (0);
}
cpipe = fp->f_data;
PIPE_LOCK(cpipe);
switch (kn->kn_filter) {
case EVFILT_READ:
@ -1598,13 +1684,14 @@ pipe_kqfilter(struct file *fp, struct knote *kn)
PIPE_UNLOCK(cpipe);
return (EPIPE);
}
cpipe = cpipe->pipe_peer;
cpipe = PIPE_PEER(cpipe);
break;
default:
PIPE_UNLOCK(cpipe);
return (EINVAL);
}
kn->kn_hook = cpipe;
knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
PIPE_UNLOCK(cpipe);
return (0);
@ -1613,11 +1700,9 @@ pipe_kqfilter(struct file *fp, struct knote *kn)
static void
filt_pipedetach(struct knote *kn)
{
struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
struct pipe *cpipe = kn->kn_hook;
PIPE_LOCK(cpipe);
if (kn->kn_filter == EVFILT_WRITE)
cpipe = cpipe->pipe_peer;
knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
PIPE_UNLOCK(cpipe);
}
@ -1626,7 +1711,7 @@ filt_pipedetach(struct knote *kn)
static int
filt_piperead(struct knote *kn, long hint)
{
struct pipe *rpipe = kn->kn_fp->f_data;
struct pipe *rpipe = kn->kn_hook;
struct pipe *wpipe = rpipe->pipe_peer;
int ret;
@ -1651,15 +1736,15 @@ filt_piperead(struct knote *kn, long hint)
static int
filt_pipewrite(struct knote *kn, long hint)
{
struct pipe *rpipe = kn->kn_fp->f_data;
struct pipe *wpipe = rpipe->pipe_peer;
struct pipe *wpipe;
PIPE_LOCK(rpipe);
wpipe = kn->kn_hook;
PIPE_LOCK(wpipe);
if (wpipe->pipe_present != PIPE_ACTIVE ||
(wpipe->pipe_state & PIPE_EOF)) {
kn->kn_data = 0;
kn->kn_flags |= EV_EOF;
PIPE_UNLOCK(rpipe);
PIPE_UNLOCK(wpipe);
return (1);
}
kn->kn_data = (wpipe->pipe_buffer.size > 0) ?
@ -1667,6 +1752,19 @@ filt_pipewrite(struct knote *kn, long hint)
if (wpipe->pipe_state & PIPE_DIRECTW)
kn->kn_data = 0;
PIPE_UNLOCK(rpipe);
PIPE_UNLOCK(wpipe);
return (kn->kn_data >= PIPE_BUF);
}
static void
filt_pipedetach_notsup(struct knote *kn)
{
}
static int
filt_pipenotsup(struct knote *kn, long hint)
{
return (0);
}

View File

@ -57,6 +57,7 @@
* See sys_pipe.c for info on what these limits mean.
*/
extern long maxpipekva;
extern struct fileops pipeops;
/*
* Pipe buffer information.
@ -94,6 +95,7 @@ struct pipemapping {
#define PIPE_LWANT 0x200 /* Process wants exclusive access to pointers/data. */
#define PIPE_DIRECTW 0x400 /* Pipe direct write active. */
#define PIPE_DIRECTOK 0x800 /* Direct mode ok. */
#define PIPE_NAMED 0x1000 /* Is a named pipe. */
/*
* Per-pipe data structure.
@ -138,5 +140,7 @@ struct pipepair {
#define PIPE_UNLOCK(pipe) mtx_unlock(PIPE_MTX(pipe))
#define PIPE_LOCK_ASSERT(pipe, type) mtx_assert(PIPE_MTX(pipe), (type))
void pipe_dtor(struct pipe *dpipe);
int pipe_named_ctor(struct pipe **ppipe, struct thread *td);
#endif /* !_SYS_PIPE_H_ */