1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1996 John S. Dyson
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice immediately at the beginning of the file, without modification,
|
|
|
|
* this list of conditions, and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Absolutely no warranty of function or purpose is made by the author
|
|
|
|
* John S. Dyson.
|
1996-02-11 22:09:50 +00:00
|
|
|
* 4. Modifications may be freely made to this file if the above conditions
|
1996-01-28 23:38:26 +00:00
|
|
|
* are met.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file contains a high-performance replacement for the socket-based
|
|
|
|
* pipes scheme originally used in FreeBSD/4.4Lite. It does not support
|
|
|
|
* all features of sockets, but does do everything that pipes normally
|
|
|
|
* do.
|
|
|
|
*/
|
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* This code has two modes of operation, a small write mode and a large
|
|
|
|
* write mode. The small write mode acts like conventional pipes with
|
|
|
|
* a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
|
|
|
|
* "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
|
|
|
|
* and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
|
|
|
|
* the receiving process can copy it directly from the pages in the sending
|
|
|
|
* process.
|
|
|
|
*
|
|
|
|
* If the sending process receives a signal, it is possible that it will
|
1996-02-05 05:50:34 +00:00
|
|
|
* go away, and certainly its address space can change, because control
|
1996-02-04 19:56:35 +00:00
|
|
|
* is returned back to the user-mode side. In that case, the pipe code
|
|
|
|
* arranges to copy the buffer supplied by the user process, to a pageable
|
|
|
|
* kernel buffer, and the receiving process will grab the data from the
|
|
|
|
* pageable kernel buffer. Since signals don't happen all that often,
|
|
|
|
* the copy operation is normally eliminated.
|
|
|
|
*
|
|
|
|
* The constant PIPE_MINDIRECT is chosen to make sure that buffering will
|
|
|
|
* happen for small transfers so that the system will not spend all of
|
2003-08-11 05:51:51 +00:00
|
|
|
* its time context switching.
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
2003-08-11 05:51:51 +00:00
|
|
|
* In order to limit the resource use of pipes, two sysctls exist:
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
2003-08-11 05:51:51 +00:00
|
|
|
* kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
|
|
|
|
* address space available to us in pipe_map. Whenever the amount in use
|
|
|
|
* exceeds half of this value, all new pipes will be created with size
|
|
|
|
* SMALL_PIPE_SIZE, rather than PIPE_SIZE. Big pipe creation will be limited
|
|
|
|
* as well. This value is loader tunable only.
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
|
|
|
* kern.ipc.maxpipekvawired - This value limits the amount of memory that may
|
|
|
|
* be wired in order to facilitate direct copies using page flipping.
|
|
|
|
* Whenever this value is exceeded, pipes will fall back to using regular
|
2003-08-11 05:51:51 +00:00
|
|
|
* copies. This value is sysctl controllable at all times.
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
|
|
|
* These values are autotuned in subr_param.c.
|
|
|
|
*
|
|
|
|
* Memory usage may be monitored through the sysctls
|
|
|
|
* kern.ipc.pipes, kern.ipc.pipekva and kern.ipc.pipekvawired.
|
|
|
|
*
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2002-08-13 02:47:13 +00:00
|
|
|
#include "opt_mac.h"
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/filedesc.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/filio.h>
|
2002-02-27 11:27:48 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/lock.h>
|
2002-08-13 02:47:13 +00:00
|
|
|
#include <sys/mac.h>
|
2001-05-19 01:28:09 +00:00
|
|
|
#include <sys/mutex.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/ttycom.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/stat.h>
|
2002-03-09 22:06:31 +00:00
|
|
|
#include <sys/malloc.h>
|
1997-09-14 02:43:25 +00:00
|
|
|
#include <sys/poll.h>
|
2001-01-09 04:33:49 +00:00
|
|
|
#include <sys/selinfo.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/signalvar.h>
|
2003-07-08 04:02:31 +00:00
|
|
|
#include <sys/sysctl.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/sysproto.h>
|
|
|
|
#include <sys/pipe.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/proc.h>
|
1999-12-26 13:04:52 +00:00
|
|
|
#include <sys/vnode.h>
|
1998-03-28 10:33:27 +00:00
|
|
|
#include <sys/uio.h>
|
2000-04-16 18:53:38 +00:00
|
|
|
#include <sys/event.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
1996-02-04 19:56:35 +00:00
|
|
|
#include <vm/vm_page.h>
|
2002-03-20 04:09:59 +00:00
|
|
|
#include <vm/uma.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* Use this define if you want to disable *fancy* VM things. Expect an
|
|
|
|
* approx 30% decrease in transfer rate. This could be useful for
|
|
|
|
* NetBSD or OpenBSD.
|
|
|
|
*/
|
|
|
|
/* #define PIPE_NODIRECT */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* interfaces to the outside world
|
|
|
|
*/
|
2002-12-24 09:44:51 +00:00
|
|
|
static fo_rdwr_t pipe_read;
|
|
|
|
static fo_rdwr_t pipe_write;
|
|
|
|
static fo_ioctl_t pipe_ioctl;
|
|
|
|
static fo_poll_t pipe_poll;
|
|
|
|
static fo_kqfilter_t pipe_kqfilter;
|
|
|
|
static fo_stat_t pipe_stat;
|
|
|
|
static fo_close_t pipe_close;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2001-02-15 16:34:11 +00:00
|
|
|
static struct fileops pipeops = {
|
2003-06-18 18:16:40 +00:00
|
|
|
.fo_read = pipe_read,
|
|
|
|
.fo_write = pipe_write,
|
|
|
|
.fo_ioctl = pipe_ioctl,
|
|
|
|
.fo_poll = pipe_poll,
|
|
|
|
.fo_kqfilter = pipe_kqfilter,
|
|
|
|
.fo_stat = pipe_stat,
|
|
|
|
.fo_close = pipe_close,
|
|
|
|
.fo_flags = DFLAG_PASSABLE
|
2001-02-15 16:34:11 +00:00
|
|
|
};
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
static void filt_pipedetach(struct knote *kn);
|
|
|
|
static int filt_piperead(struct knote *kn, long hint);
|
|
|
|
static int filt_pipewrite(struct knote *kn, long hint);
|
|
|
|
|
2001-02-15 16:34:11 +00:00
|
|
|
static struct filterops pipe_rfiltops =
|
|
|
|
{ 1, NULL, filt_pipedetach, filt_piperead };
|
|
|
|
static struct filterops pipe_wfiltops =
|
|
|
|
{ 1, NULL, filt_pipedetach, filt_pipewrite };
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* Default pipe buffer size(s), this can be kind-of large now because pipe
|
|
|
|
* space is pageable. The pipe code will try to maintain locality of
|
|
|
|
* reference for performance reasons, so small amounts of outstanding I/O
|
|
|
|
* will not wipe the cache.
|
|
|
|
*/
|
1996-02-04 19:56:35 +00:00
|
|
|
#define MINPIPESIZE (PIPE_SIZE/3)
|
|
|
|
#define MAXPIPESIZE (2*PIPE_SIZE/3)
|
|
|
|
|
1996-07-13 22:52:50 +00:00
|
|
|
/*
|
|
|
|
* Limit the number of "big" pipes
|
|
|
|
*/
|
|
|
|
#define LIMITBIGPIPES 32
|
1998-02-09 06:11:36 +00:00
|
|
|
static int nbigpipe;
|
1996-07-13 22:52:50 +00:00
|
|
|
|
2003-07-08 04:02:31 +00:00
|
|
|
static int amountpipes;
|
1996-07-12 08:14:58 +00:00
|
|
|
static int amountpipekva;
|
2003-07-08 04:02:31 +00:00
|
|
|
static int amountpipekvawired;
|
|
|
|
|
|
|
|
SYSCTL_DECL(_kern_ipc);
|
|
|
|
|
2003-10-21 18:28:36 +00:00
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN,
|
2003-07-08 04:02:31 +00:00
|
|
|
&maxpipekva, 0, "Pipe KVA limit");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekvawired, CTLFLAG_RW,
|
|
|
|
&maxpipekvawired, 0, "Pipe KVA wired limit");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, pipes, CTLFLAG_RD,
|
2003-07-09 21:59:48 +00:00
|
|
|
&amountpipes, 0, "Current # of pipes");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, bigpipes, CTLFLAG_RD,
|
|
|
|
&nbigpipe, 0, "Current # of big pipes");
|
2003-07-08 04:02:31 +00:00
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
|
|
|
|
&amountpipekva, 0, "Pipe KVA usage");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, pipekvawired, CTLFLAG_RD,
|
|
|
|
&amountpipekvawired, 0, "Pipe wired KVA usage");
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2002-02-27 18:51:53 +00:00
|
|
|
static void pipeinit(void *dummy __unused);
|
|
|
|
static void pipeclose(struct pipe *cpipe);
|
|
|
|
static void pipe_free_kmem(struct pipe *cpipe);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
static int pipe_create(struct pipe *pipe);
|
2002-02-27 18:51:53 +00:00
|
|
|
static __inline int pipelock(struct pipe *cpipe, int catch);
|
|
|
|
static __inline void pipeunlock(struct pipe *cpipe);
|
|
|
|
static __inline void pipeselwakeup(struct pipe *cpipe);
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
2002-02-27 18:51:53 +00:00
|
|
|
static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
|
|
|
|
static void pipe_destroy_write_buffer(struct pipe *wpipe);
|
|
|
|
static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
|
|
|
|
static void pipe_clone_write_buffer(struct pipe *wpipe);
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
2002-02-27 18:51:53 +00:00
|
|
|
static int pipespace(struct pipe *cpipe, int size);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
static void pipe_zone_ctor(void *mem, int size, void *arg);
|
|
|
|
static void pipe_zone_dtor(void *mem, int size, void *arg);
|
|
|
|
static void pipe_zone_init(void *mem, int size);
|
|
|
|
static void pipe_zone_fini(void *mem, int size);
|
|
|
|
|
2002-03-20 04:09:59 +00:00
|
|
|
static uma_zone_t pipe_zone;
|
1997-08-05 00:02:08 +00:00
|
|
|
|
2002-02-27 11:27:48 +00:00
|
|
|
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
|
|
|
|
|
|
|
|
static void
|
|
|
|
pipeinit(void *dummy __unused)
|
|
|
|
{
|
2003-08-13 20:01:38 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
pipe_zone = uma_zcreate("PIPE", sizeof(struct pipepair),
|
|
|
|
pipe_zone_ctor, pipe_zone_dtor, pipe_zone_init, pipe_zone_fini,
|
|
|
|
UMA_ALIGN_PTR, 0);
|
2003-08-13 20:01:38 +00:00
|
|
|
KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
|
2002-02-27 11:27:48 +00:00
|
|
|
}
|
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
static void
|
|
|
|
pipe_zone_ctor(void *mem, int size, void *arg)
|
|
|
|
{
|
|
|
|
struct pipepair *pp;
|
|
|
|
struct pipe *rpipe, *wpipe;
|
|
|
|
|
|
|
|
KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
|
|
|
|
|
|
|
|
pp = (struct pipepair *)mem;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We zero both pipe endpoints to make sure all the kmem pointers
|
|
|
|
* are NULL, flag fields are zero'd, etc. We timestamp both
|
|
|
|
* endpoints with the same time.
|
|
|
|
*/
|
|
|
|
rpipe = &pp->pp_rpipe;
|
|
|
|
bzero(rpipe, sizeof(*rpipe));
|
|
|
|
vfs_timestamp(&rpipe->pipe_ctime);
|
|
|
|
rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
|
|
|
|
|
|
|
|
wpipe = &pp->pp_wpipe;
|
|
|
|
bzero(wpipe, sizeof(*wpipe));
|
|
|
|
wpipe->pipe_ctime = rpipe->pipe_ctime;
|
|
|
|
wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
|
|
|
|
|
|
|
|
rpipe->pipe_peer = wpipe;
|
|
|
|
rpipe->pipe_pair = pp;
|
|
|
|
wpipe->pipe_peer = rpipe;
|
|
|
|
wpipe->pipe_pair = pp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark both endpoints as present; they will later get free'd
|
|
|
|
* one at a time. When both are free'd, then the whole pair
|
|
|
|
* is released.
|
|
|
|
*/
|
|
|
|
rpipe->pipe_present = 1;
|
|
|
|
wpipe->pipe_present = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Eventually, the MAC Framework may initialize the label
|
|
|
|
* in ctor or init, but for now we do it elswhere to avoid
|
|
|
|
* blocking in ctor or init.
|
|
|
|
*/
|
|
|
|
pp->pp_label = NULL;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pipe_zone_dtor(void *mem, int size, void *arg)
|
|
|
|
{
|
|
|
|
struct pipepair *pp;
|
|
|
|
|
|
|
|
KASSERT(size == sizeof(*pp), ("pipe_zone_dtor: wrong size"));
|
|
|
|
|
|
|
|
pp = (struct pipepair *)mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pipe_zone_init(void *mem, int size)
|
|
|
|
{
|
|
|
|
struct pipepair *pp;
|
|
|
|
|
|
|
|
KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
|
|
|
|
|
|
|
|
pp = (struct pipepair *)mem;
|
|
|
|
|
|
|
|
mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pipe_zone_fini(void *mem, int size)
|
|
|
|
{
|
|
|
|
struct pipepair *pp;
|
|
|
|
|
|
|
|
KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
|
|
|
|
|
|
|
|
pp = (struct pipepair *)mem;
|
|
|
|
|
|
|
|
mtx_destroy(&pp->pp_mtx);
|
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
* The pipe system call for the DTYPE_PIPE type of pipes. If we fail,
|
|
|
|
* let the zone pick up the pieces via pipeclose().
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
pipe(td, uap)
|
|
|
|
struct thread *td;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe_args /* {
|
|
|
|
int dummy;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct filedesc *fdp = td->td_proc->p_fd;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct file *rf, *wf;
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
struct pipepair *pp;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe *rpipe, *wpipe;
|
|
|
|
int fd, error;
|
1997-08-05 00:02:08 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
pp = uma_zalloc(pipe_zone, M_WAITOK);
|
|
|
|
#ifdef MAC
|
|
|
|
/*
|
|
|
|
* struct pipe represents a pipe endpoint. The MAC label is shared
|
|
|
|
* between the connected endpoints. As a result mac_init_pipe() and
|
|
|
|
* mac_create_pipe() should only be called on one of the endpoints
|
|
|
|
* after they have been connected.
|
|
|
|
*/
|
|
|
|
mac_init_pipe(pp);
|
|
|
|
mac_create_pipe(td->td_ucred, pp);
|
|
|
|
#endif
|
|
|
|
rpipe = &pp->pp_rpipe;
|
|
|
|
wpipe = &pp->pp_wpipe;
|
|
|
|
|
|
|
|
if (pipe_create(rpipe) || pipe_create(wpipe)) {
|
2004-01-11 19:54:45 +00:00
|
|
|
pipeclose(rpipe);
|
|
|
|
pipeclose(wpipe);
|
2001-05-08 09:09:18 +00:00
|
|
|
return (ENFILE);
|
|
|
|
}
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
rpipe->pipe_state |= PIPE_DIRECTOK;
|
|
|
|
wpipe->pipe_state |= PIPE_DIRECTOK;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
error = falloc(td, &rf, &fd);
|
2001-01-11 00:13:54 +00:00
|
|
|
if (error) {
|
|
|
|
pipeclose(rpipe);
|
|
|
|
pipeclose(wpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
2003-10-19 20:41:07 +00:00
|
|
|
/* An extra reference on `rf' has been held for us by falloc(). */
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = fd;
|
2001-01-11 00:13:54 +00:00
|
|
|
|
2001-01-08 22:14:48 +00:00
|
|
|
/*
|
|
|
|
* Warning: once we've gotten past allocation of the fd for the
|
|
|
|
* read-side, we can only drop the read side via fdrop() in order
|
|
|
|
* to avoid races against processes which manage to dup() the read
|
|
|
|
* side while we are blocked trying to allocate the write side.
|
|
|
|
*/
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_LOCK(rf);
|
1996-01-28 23:38:26 +00:00
|
|
|
rf->f_flag = FREAD | FWRITE;
|
|
|
|
rf->f_type = DTYPE_PIPE;
|
2003-01-13 00:33:17 +00:00
|
|
|
rf->f_data = rpipe;
|
1999-08-04 18:53:50 +00:00
|
|
|
rf->f_ops = &pipeops;
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_UNLOCK(rf);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = falloc(td, &wf, &fd);
|
2001-01-11 00:13:54 +00:00
|
|
|
if (error) {
|
2002-01-13 11:58:06 +00:00
|
|
|
FILEDESC_LOCK(fdp);
|
2001-09-12 08:38:13 +00:00
|
|
|
if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
|
|
|
|
fdp->fd_ofiles[td->td_retval[0]] = NULL;
|
2004-01-15 10:15:04 +00:00
|
|
|
fdunused(fdp, td->td_retval[0]);
|
2002-01-13 11:58:06 +00:00
|
|
|
FILEDESC_UNLOCK(fdp);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(rf, td);
|
2004-01-11 19:43:14 +00:00
|
|
|
} else {
|
2002-01-13 11:58:06 +00:00
|
|
|
FILEDESC_UNLOCK(fdp);
|
2004-01-11 19:43:14 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(rf, td);
|
2001-01-11 00:13:54 +00:00
|
|
|
/* rpipe has been closed by fdrop(). */
|
|
|
|
pipeclose(wpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
2003-10-19 20:41:07 +00:00
|
|
|
/* An extra reference on `wf' has been held for us by falloc(). */
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_LOCK(wf);
|
1996-01-28 23:38:26 +00:00
|
|
|
wf->f_flag = FREAD | FWRITE;
|
|
|
|
wf->f_type = DTYPE_PIPE;
|
2003-01-13 00:33:17 +00:00
|
|
|
wf->f_data = wpipe;
|
1999-08-04 18:53:50 +00:00
|
|
|
wf->f_ops = &pipeops;
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_UNLOCK(wf);
|
2003-10-19 20:41:07 +00:00
|
|
|
fdrop(wf, td);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[1] = fd;
|
|
|
|
fdrop(rf, td);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1996-02-04 22:09:12 +00:00
|
|
|
/*
|
|
|
|
* Allocate kva for pipe circular buffer, the space is pageable
|
2001-05-08 09:09:18 +00:00
|
|
|
* This routine will 'realloc' the size of a pipe safely, if it fails
|
|
|
|
* it will retain the old buffer.
|
|
|
|
* If it fails it will return ENOMEM.
|
1996-02-04 22:09:12 +00:00
|
|
|
*/
|
2001-05-08 09:09:18 +00:00
|
|
|
static int
|
|
|
|
pipespace(cpipe, size)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe *cpipe;
|
2001-05-08 09:09:18 +00:00
|
|
|
int size;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
caddr_t buffer;
|
2003-11-06 05:08:12 +00:00
|
|
|
int error;
|
2003-07-08 04:02:31 +00:00
|
|
|
static int curfail = 0;
|
|
|
|
static struct timeval lastfail;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
|
2001-07-04 16:20:28 +00:00
|
|
|
|
2003-08-11 05:51:51 +00:00
|
|
|
size = round_page(size);
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
1996-02-11 22:09:50 +00:00
|
|
|
* XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
2003-08-11 05:51:51 +00:00
|
|
|
buffer = (caddr_t) vm_map_min(pipe_map);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The map entry is, by default, pageable.
|
1996-02-11 22:09:50 +00:00
|
|
|
* XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
2003-11-06 05:08:12 +00:00
|
|
|
error = vm_map_find(pipe_map, NULL, 0,
|
2001-05-08 09:09:18 +00:00
|
|
|
(vm_offset_t *) &buffer, size, 1,
|
1996-01-29 02:57:33 +00:00
|
|
|
VM_PROT_ALL, VM_PROT_ALL, 0);
|
2001-05-08 09:09:18 +00:00
|
|
|
if (error != KERN_SUCCESS) {
|
2003-08-11 05:51:51 +00:00
|
|
|
if (ppsratecheck(&lastfail, &curfail, 1))
|
2003-12-28 01:19:58 +00:00
|
|
|
printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
|
2001-05-08 09:09:18 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* free old resources if we're resizing */
|
|
|
|
pipe_free_kmem(cpipe);
|
|
|
|
cpipe->pipe_buffer.buffer = buffer;
|
|
|
|
cpipe->pipe_buffer.size = size;
|
|
|
|
cpipe->pipe_buffer.in = 0;
|
|
|
|
cpipe->pipe_buffer.out = 0;
|
|
|
|
cpipe->pipe_buffer.cnt = 0;
|
2003-07-08 04:02:31 +00:00
|
|
|
atomic_add_int(&amountpipes, 1);
|
2003-02-13 19:39:54 +00:00
|
|
|
atomic_add_int(&amountpipekva, cpipe->pipe_buffer.size);
|
2001-05-08 09:09:18 +00:00
|
|
|
return (0);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
* Initialize and allocate VM and memory for pipe. The structure
|
|
|
|
* will start out zero'd from the ctor, so we just manage the kmem.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
2001-05-08 09:09:18 +00:00
|
|
|
static int
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
pipe_create(pipe)
|
|
|
|
struct pipe *pipe;
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
int error;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2003-07-08 04:02:31 +00:00
|
|
|
/*
|
|
|
|
* Reduce to 1/4th pipe size if we're over our global max.
|
|
|
|
*/
|
2003-08-11 05:51:51 +00:00
|
|
|
if (amountpipekva > maxpipekva / 2)
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
error = pipespace(pipe, SMALL_PIPE_SIZE);
|
2003-07-08 04:02:31 +00:00
|
|
|
else
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
error = pipespace(pipe, PIPE_SIZE);
|
2001-05-17 19:47:09 +00:00
|
|
|
if (error)
|
2001-05-08 09:09:18 +00:00
|
|
|
return (error);
|
|
|
|
|
|
|
|
return (0);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lock a pipe for I/O, blocking other access
|
|
|
|
*/
|
|
|
|
static __inline int
|
1996-02-04 19:56:35 +00:00
|
|
|
pipelock(cpipe, catch)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe *cpipe;
|
1996-02-04 19:56:35 +00:00
|
|
|
int catch;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
1996-01-31 06:00:45 +00:00
|
|
|
int error;
|
2001-05-08 09:09:18 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
|
|
|
|
while (cpipe->pipe_state & PIPE_LOCKFL) {
|
1996-01-28 23:38:26 +00:00
|
|
|
cpipe->pipe_state |= PIPE_LWANT;
|
2002-02-27 07:35:59 +00:00
|
|
|
error = msleep(cpipe, PIPE_MTX(cpipe),
|
|
|
|
catch ? (PRIBIO | PCATCH) : PRIBIO,
|
2001-05-17 19:47:09 +00:00
|
|
|
"pipelk", 0);
|
2004-01-11 19:54:45 +00:00
|
|
|
if (error != 0)
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2002-02-27 07:35:59 +00:00
|
|
|
cpipe->pipe_state |= PIPE_LOCKFL;
|
2001-05-17 19:47:09 +00:00
|
|
|
return (0);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unlock a pipe I/O lock
|
|
|
|
*/
|
|
|
|
static __inline void
|
|
|
|
pipeunlock(cpipe)
|
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
|
|
|
|
cpipe->pipe_state &= ~PIPE_LOCKFL;
|
1996-01-28 23:38:26 +00:00
|
|
|
if (cpipe->pipe_state & PIPE_LWANT) {
|
|
|
|
cpipe->pipe_state &= ~PIPE_LWANT;
|
1996-02-22 03:33:52 +00:00
|
|
|
wakeup(cpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
static __inline void
|
|
|
|
pipeselwakeup(cpipe)
|
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
if (cpipe->pipe_state & PIPE_SEL) {
|
|
|
|
cpipe->pipe_state &= ~PIPE_SEL;
|
2003-11-09 09:17:26 +00:00
|
|
|
selwakeuppri(&cpipe->pipe_sel, PSOCK);
|
1996-02-11 22:09:50 +00:00
|
|
|
}
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
|
2002-05-01 20:44:46 +00:00
|
|
|
pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
KNOTE(&cpipe->pipe_sel.si_note, 0);
|
1996-02-11 22:09:50 +00:00
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
pipe_read(fp, uio, active_cred, flags, td)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct uio *uio;
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
struct ucred *active_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1999-04-04 21:41:28 +00:00
|
|
|
int flags;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct pipe *rpipe = fp->f_data;
|
1999-06-05 03:53:57 +00:00
|
|
|
int error;
|
1996-01-28 23:38:26 +00:00
|
|
|
int nread = 0;
|
1996-10-11 02:27:30 +00:00
|
|
|
u_int size;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
++rpipe->pipe_busy;
|
1999-06-05 03:53:57 +00:00
|
|
|
error = pipelock(rpipe, 1);
|
|
|
|
if (error)
|
|
|
|
goto unlocked_error;
|
|
|
|
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
error = mac_check_pipe_read(active_cred, rpipe->pipe_pair);
|
2002-08-13 02:47:13 +00:00
|
|
|
if (error)
|
|
|
|
goto locked_error;
|
|
|
|
#endif
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
while (uio->uio_resid) {
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* normal pipe buffer receive
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
if (rpipe->pipe_buffer.cnt > 0) {
|
1996-10-11 02:27:30 +00:00
|
|
|
size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
|
1996-01-28 23:38:26 +00:00
|
|
|
if (size > rpipe->pipe_buffer.cnt)
|
|
|
|
size = rpipe->pipe_buffer.cnt;
|
1996-10-11 02:27:30 +00:00
|
|
|
if (size > (u_int) uio->uio_resid)
|
|
|
|
size = (u_int) uio->uio_resid;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2003-06-09 21:57:48 +00:00
|
|
|
error = uiomove(
|
|
|
|
&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
|
|
|
|
size, uio);
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
if (error)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
2001-05-17 19:47:09 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
rpipe->pipe_buffer.out += size;
|
|
|
|
if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
|
|
|
|
rpipe->pipe_buffer.out = 0;
|
|
|
|
|
|
|
|
rpipe->pipe_buffer.cnt -= size;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is no more to read in the pipe, reset
|
|
|
|
* its pointers to the beginning. This improves
|
|
|
|
* cache hit stats.
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_buffer.cnt == 0) {
|
|
|
|
rpipe->pipe_buffer.in = 0;
|
|
|
|
rpipe->pipe_buffer.out = 0;
|
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
nread += size;
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* Direct copy, bypassing a kernel buffer.
|
|
|
|
*/
|
|
|
|
} else if ((size = rpipe->pipe_map.cnt) &&
|
1999-06-05 03:53:57 +00:00
|
|
|
(rpipe->pipe_state & PIPE_DIRECTW)) {
|
|
|
|
caddr_t va;
|
1996-10-11 02:27:30 +00:00
|
|
|
if (size > (u_int) uio->uio_resid)
|
|
|
|
size = (u_int) uio->uio_resid;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
2001-05-17 19:47:09 +00:00
|
|
|
va = (caddr_t) rpipe->pipe_map.kva +
|
|
|
|
rpipe->pipe_map.pos;
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
1999-06-05 03:53:57 +00:00
|
|
|
error = uiomove(va, size, uio);
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
nread += size;
|
|
|
|
rpipe->pipe_map.pos += size;
|
|
|
|
rpipe->pipe_map.cnt -= size;
|
|
|
|
if (rpipe->pipe_map.cnt == 0) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_DIRECTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-01-28 23:38:26 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* detect EOF condition
|
2001-05-17 19:47:09 +00:00
|
|
|
* read returns 0 on EOF, no need to set error
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
2001-05-17 19:47:09 +00:00
|
|
|
if (rpipe->pipe_state & PIPE_EOF)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
1999-02-04 23:50:49 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* If the "write-side" has been blocked, wake it up now.
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_WANTW) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
1999-02-04 23:50:49 +00:00
|
|
|
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Break if some data was read.
|
1999-02-04 23:50:49 +00:00
|
|
|
*/
|
1999-06-05 03:53:57 +00:00
|
|
|
if (nread > 0)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
1996-07-04 04:36:56 +00:00
|
|
|
|
1999-02-04 23:50:49 +00:00
|
|
|
/*
|
2004-01-11 19:54:45 +00:00
|
|
|
* Unlock the pipe buffer for our remaining processing.
|
2003-06-09 21:57:48 +00:00
|
|
|
* We will either break out with an error or we will
|
|
|
|
* sleep and relock to loop.
|
1999-02-04 23:50:49 +00:00
|
|
|
*/
|
1999-06-05 03:53:57 +00:00
|
|
|
pipeunlock(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Handle non-blocking mode operation or
|
|
|
|
* wait for more data.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
2001-05-17 19:47:09 +00:00
|
|
|
if (fp->f_flag & FNONBLOCK) {
|
1999-06-05 03:53:57 +00:00
|
|
|
error = EAGAIN;
|
2001-05-17 19:47:09 +00:00
|
|
|
} else {
|
1999-06-05 03:53:57 +00:00
|
|
|
rpipe->pipe_state |= PIPE_WANTR;
|
2002-02-27 07:35:59 +00:00
|
|
|
if ((error = msleep(rpipe, PIPE_MTX(rpipe),
|
|
|
|
PRIBIO | PCATCH,
|
2001-05-24 18:06:22 +00:00
|
|
|
"piperd", 0)) == 0)
|
1999-06-05 03:53:57 +00:00
|
|
|
error = pipelock(rpipe, 1);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
1999-06-05 03:53:57 +00:00
|
|
|
if (error)
|
|
|
|
goto unlocked_error;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
locked_error:
|
|
|
|
#endif
|
1999-06-05 03:53:57 +00:00
|
|
|
pipeunlock(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
/* XXX: should probably do this before getting any locks. */
|
1997-03-22 06:53:45 +00:00
|
|
|
if (error == 0)
|
1999-12-26 13:04:52 +00:00
|
|
|
vfs_timestamp(&rpipe->pipe_atime);
|
1999-06-05 03:53:57 +00:00
|
|
|
unlocked_error:
|
1996-01-28 23:38:26 +00:00
|
|
|
--rpipe->pipe_busy;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PIPE_WANT processing only makes sense if pipe_busy is 0.
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
|
|
|
|
rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
|
|
|
|
wakeup(rpipe);
|
|
|
|
} else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Handle write blocking hysteresis.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_WANTW) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
|
1996-03-25 01:48:28 +00:00
|
|
|
if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(rpipe);
|
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* Map the sending processes' buffer into kernel space and wire it.
|
|
|
|
* This is similar to a physical write operation.
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
static int
|
1996-02-04 19:56:35 +00:00
|
|
|
pipe_build_write_buffer(wpipe, uio)
|
|
|
|
struct pipe *wpipe;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct uio *uio;
|
|
|
|
{
|
2003-09-08 04:58:32 +00:00
|
|
|
pmap_t pmap;
|
1996-10-11 02:27:30 +00:00
|
|
|
u_int size;
|
2003-09-08 04:58:32 +00:00
|
|
|
int i, j;
|
2003-03-25 00:07:06 +00:00
|
|
|
vm_offset_t addr, endaddr;
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2002-02-27 18:49:58 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
|
2001-07-04 16:20:28 +00:00
|
|
|
|
1996-10-11 02:27:30 +00:00
|
|
|
size = (u_int) uio->uio_iov->iov_len;
|
1996-02-04 19:56:35 +00:00
|
|
|
if (size > wpipe->pipe_buffer.size)
|
|
|
|
size = wpipe->pipe_buffer.size;
|
|
|
|
|
2003-09-08 04:58:32 +00:00
|
|
|
pmap = vmspace_pmap(curproc->p_vmspace);
|
1998-10-13 08:24:45 +00:00
|
|
|
endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
|
2001-05-17 19:47:09 +00:00
|
|
|
addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
|
|
|
|
for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
|
2002-07-13 04:09:45 +00:00
|
|
|
/*
|
|
|
|
* vm_fault_quick() can sleep. Consequently,
|
|
|
|
* vm_page_lock_queue() and vm_page_unlock_queue()
|
|
|
|
* should not be performed outside of this loop.
|
|
|
|
*/
|
2003-09-08 04:58:32 +00:00
|
|
|
race:
|
|
|
|
if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) {
|
2002-07-13 04:09:45 +00:00
|
|
|
vm_page_lock_queues();
|
2003-09-08 04:58:32 +00:00
|
|
|
for (j = 0; j < i; j++)
|
2003-08-11 00:17:44 +00:00
|
|
|
vm_page_unhold(wpipe->pipe_map.ms[j]);
|
2002-07-13 04:09:45 +00:00
|
|
|
vm_page_unlock_queues();
|
2001-05-17 19:47:09 +00:00
|
|
|
return (EFAULT);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
2003-09-12 07:13:15 +00:00
|
|
|
wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr,
|
|
|
|
VM_PROT_READ);
|
2003-09-08 04:58:32 +00:00
|
|
|
if (wpipe->pipe_map.ms[i] == NULL)
|
|
|
|
goto race;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set up the control block
|
|
|
|
*/
|
|
|
|
wpipe->pipe_map.npages = i;
|
2001-05-17 19:47:09 +00:00
|
|
|
wpipe->pipe_map.pos =
|
|
|
|
((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
|
1996-02-04 19:56:35 +00:00
|
|
|
wpipe->pipe_map.cnt = size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* and map the buffer
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_map.kva == 0) {
|
1996-02-05 05:17:15 +00:00
|
|
|
/*
|
|
|
|
* We need to allocate space for an extra page because the
|
|
|
|
* address range might (will) span pages at times.
|
|
|
|
*/
|
2003-07-30 18:55:04 +00:00
|
|
|
wpipe->pipe_map.kva = kmem_alloc_nofault(kernel_map,
|
1996-02-05 05:17:15 +00:00
|
|
|
wpipe->pipe_buffer.size + PAGE_SIZE);
|
2003-08-11 05:51:51 +00:00
|
|
|
atomic_add_int(&amountpipekvawired,
|
2003-02-13 19:39:54 +00:00
|
|
|
wpipe->pipe_buffer.size + PAGE_SIZE);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
|
|
|
|
wpipe->pipe_map.npages);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* and update the uio data
|
|
|
|
*/
|
|
|
|
|
|
|
|
uio->uio_iov->iov_len -= size;
|
2002-10-11 14:58:34 +00:00
|
|
|
uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
|
1996-02-04 19:56:35 +00:00
|
|
|
if (uio->uio_iov->iov_len == 0)
|
|
|
|
uio->uio_iov++;
|
|
|
|
uio->uio_resid -= size;
|
|
|
|
uio->uio_offset += size;
|
2001-05-17 19:47:09 +00:00
|
|
|
return (0);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmap and unwire the process buffer
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pipe_destroy_write_buffer(wpipe)
|
2001-05-17 19:47:09 +00:00
|
|
|
struct pipe *wpipe;
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
|
|
|
int i;
|
2001-05-08 09:09:18 +00:00
|
|
|
|
2002-02-27 18:49:58 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
|
1996-02-04 19:56:35 +00:00
|
|
|
if (wpipe->pipe_map.kva) {
|
1996-07-13 22:52:50 +00:00
|
|
|
pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
|
|
|
|
|
2003-08-11 05:51:51 +00:00
|
|
|
if (amountpipekvawired > maxpipekvawired / 2) {
|
|
|
|
/* Conserve address space */
|
1996-02-04 19:56:35 +00:00
|
|
|
vm_offset_t kva = wpipe->pipe_map.kva;
|
|
|
|
wpipe->pipe_map.kva = 0;
|
|
|
|
kmem_free(kernel_map, kva,
|
2003-09-06 21:02:10 +00:00
|
|
|
wpipe->pipe_buffer.size + PAGE_SIZE);
|
2003-08-11 05:51:51 +00:00
|
|
|
atomic_subtract_int(&amountpipekvawired,
|
2003-02-13 19:39:54 +00:00
|
|
|
wpipe->pipe_buffer.size + PAGE_SIZE);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
}
|
2002-07-13 04:09:45 +00:00
|
|
|
vm_page_lock_queues();
|
2003-07-08 04:02:31 +00:00
|
|
|
for (i = 0; i < wpipe->pipe_map.npages; i++) {
|
2003-08-11 00:17:44 +00:00
|
|
|
vm_page_unhold(wpipe->pipe_map.ms[i]);
|
2003-07-08 04:02:31 +00:00
|
|
|
}
|
2002-07-13 04:09:45 +00:00
|
|
|
vm_page_unlock_queues();
|
2002-03-05 00:51:03 +00:00
|
|
|
wpipe->pipe_map.npages = 0;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the case of a signal, the writing process might go away. This
|
|
|
|
* code copies the data into the circular buffer so that the source
|
|
|
|
* pages can be freed without loss of data.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pipe_clone_write_buffer(wpipe)
|
2001-05-08 09:09:18 +00:00
|
|
|
struct pipe *wpipe;
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
|
|
|
int size;
|
|
|
|
int pos;
|
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
|
1996-02-04 19:56:35 +00:00
|
|
|
size = wpipe->pipe_map.cnt;
|
|
|
|
pos = wpipe->pipe_map.pos;
|
|
|
|
|
|
|
|
wpipe->pipe_buffer.in = size;
|
|
|
|
wpipe->pipe_buffer.out = 0;
|
|
|
|
wpipe->pipe_buffer.cnt = size;
|
|
|
|
wpipe->pipe_state &= ~PIPE_DIRECTW;
|
|
|
|
|
2003-09-06 21:02:10 +00:00
|
|
|
PIPE_UNLOCK(wpipe);
|
2002-03-22 16:09:22 +00:00
|
|
|
bcopy((caddr_t) wpipe->pipe_map.kva + pos,
|
2002-07-22 19:05:44 +00:00
|
|
|
wpipe->pipe_buffer.buffer, size);
|
1996-02-04 19:56:35 +00:00
|
|
|
pipe_destroy_write_buffer(wpipe);
|
2003-09-06 21:02:10 +00:00
|
|
|
PIPE_LOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This implements the pipe buffer write mechanism. Note that only
|
|
|
|
* a direct write OR a normal pipe write can be pending at any given time.
|
|
|
|
* If there are any characters in the pipe buffer, the direct write will
|
|
|
|
* be deferred until the receiving process grabs all of the bytes from
|
|
|
|
* the pipe buffer. Then the direct mapping write is set-up.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
pipe_direct_write(wpipe, uio)
|
|
|
|
struct pipe *wpipe;
|
|
|
|
struct uio *uio;
|
|
|
|
{
|
|
|
|
int error;
|
2001-05-08 09:09:18 +00:00
|
|
|
|
1996-02-07 06:41:56 +00:00
|
|
|
retry:
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
|
1996-02-04 19:56:35 +00:00
|
|
|
while (wpipe->pipe_state & PIPE_DIRECTW) {
|
2001-05-17 19:47:09 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
1996-02-07 06:41:56 +00:00
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
1996-02-09 04:36:36 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2002-02-27 07:35:59 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(wpipe),
|
|
|
|
PRIBIO | PCATCH, "pipdww", 0);
|
1996-03-25 01:48:28 +00:00
|
|
|
if (error)
|
1996-02-04 19:56:35 +00:00
|
|
|
goto error1;
|
1996-03-25 01:48:28 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
error = EPIPE;
|
|
|
|
goto error1;
|
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
|
1996-02-07 06:41:56 +00:00
|
|
|
if (wpipe->pipe_buffer.cnt > 0) {
|
2001-05-17 19:47:09 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
1996-02-07 06:41:56 +00:00
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1996-02-09 04:36:36 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2002-02-27 07:35:59 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(wpipe),
|
|
|
|
PRIBIO | PCATCH, "pipdwc", 0);
|
1996-03-25 01:48:28 +00:00
|
|
|
if (error)
|
|
|
|
goto error1;
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
error = EPIPE;
|
1996-02-04 19:56:35 +00:00
|
|
|
goto error1;
|
|
|
|
}
|
1996-02-07 06:41:56 +00:00
|
|
|
goto retry;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
1996-02-07 06:41:56 +00:00
|
|
|
wpipe->pipe_state |= PIPE_DIRECTW;
|
|
|
|
|
2002-03-15 07:18:09 +00:00
|
|
|
pipelock(wpipe, 0);
|
2003-09-08 04:58:32 +00:00
|
|
|
PIPE_UNLOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
error = pipe_build_write_buffer(wpipe, uio);
|
2003-09-08 04:58:32 +00:00
|
|
|
PIPE_LOCK(wpipe);
|
2002-03-15 07:18:09 +00:00
|
|
|
pipeunlock(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
if (error) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_DIRECTW;
|
|
|
|
goto error1;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
pipelock(wpipe, 0);
|
2003-09-06 21:02:10 +00:00
|
|
|
PIPE_UNLOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
pipe_destroy_write_buffer(wpipe);
|
2003-09-06 21:02:10 +00:00
|
|
|
PIPE_LOCK(wpipe);
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
2003-04-02 15:24:50 +00:00
|
|
|
pipeunlock(wpipe);
|
1996-03-25 01:48:28 +00:00
|
|
|
error = EPIPE;
|
|
|
|
goto error1;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-02-09 04:36:36 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
2002-02-27 07:35:59 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
|
|
|
|
"pipdwt", 0);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pipelock(wpipe,0);
|
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW) {
|
|
|
|
/*
|
|
|
|
* this bit of trickery substitutes a kernel buffer for
|
|
|
|
* the process that might be going away.
|
|
|
|
*/
|
|
|
|
pipe_clone_write_buffer(wpipe);
|
|
|
|
} else {
|
2003-09-06 21:02:10 +00:00
|
|
|
PIPE_UNLOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
pipe_destroy_write_buffer(wpipe);
|
2003-09-06 21:02:10 +00:00
|
|
|
PIPE_LOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
pipeunlock(wpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
error1:
|
|
|
|
wakeup(wpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1996-07-04 04:36:56 +00:00
|
|
|
static int
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
pipe_write(fp, uio, active_cred, flags, td)
|
1996-07-04 04:36:56 +00:00
|
|
|
struct file *fp;
|
1996-02-04 19:56:35 +00:00
|
|
|
struct uio *uio;
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
struct ucred *active_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1999-04-04 21:41:28 +00:00
|
|
|
int flags;
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
1996-01-28 23:38:26 +00:00
|
|
|
int error = 0;
|
1996-02-05 05:50:34 +00:00
|
|
|
int orig_resid;
|
1996-07-04 04:36:56 +00:00
|
|
|
struct pipe *wpipe, *rpipe;
|
|
|
|
|
2003-01-13 00:33:17 +00:00
|
|
|
rpipe = fp->f_data;
|
1996-07-04 04:36:56 +00:00
|
|
|
wpipe = rpipe->pipe_peer;
|
|
|
|
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* detect loss of pipe read side, issue SIGPIPE if lost.
|
|
|
|
*/
|
1996-07-04 04:36:56 +00:00
|
|
|
if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (EPIPE);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
error = mac_check_pipe_write(active_cred, wpipe->pipe_pair);
|
2002-08-13 02:47:13 +00:00
|
|
|
if (error) {
|
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif
|
2001-06-04 04:04:45 +00:00
|
|
|
++wpipe->pipe_busy;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
1996-07-13 22:52:50 +00:00
|
|
|
/*
|
|
|
|
* If it is advantageous to resize the pipe buffer, do
|
|
|
|
* so.
|
|
|
|
*/
|
|
|
|
if ((uio->uio_resid > PIPE_SIZE) &&
|
2003-08-11 05:51:51 +00:00
|
|
|
(amountpipekva < maxpipekva / 2) &&
|
1996-07-13 22:52:50 +00:00
|
|
|
(nbigpipe < LIMITBIGPIPES) &&
|
|
|
|
(wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
|
|
|
|
(wpipe->pipe_buffer.size <= PIPE_SIZE) &&
|
|
|
|
(wpipe->pipe_buffer.cnt == 0)) {
|
|
|
|
|
2002-10-12 22:26:41 +00:00
|
|
|
if ((error = pipelock(wpipe, 1)) == 0) {
|
2003-08-11 22:23:25 +00:00
|
|
|
PIPE_UNLOCK(wpipe);
|
2001-05-08 09:09:18 +00:00
|
|
|
if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
|
2003-07-09 21:59:48 +00:00
|
|
|
atomic_add_int(&nbigpipe, 1);
|
2003-08-11 22:23:25 +00:00
|
|
|
PIPE_LOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
pipeunlock(wpipe);
|
|
|
|
}
|
|
|
|
}
|
2001-06-04 04:04:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If an early error occured unbusy and return, waking up any pending
|
|
|
|
* readers.
|
|
|
|
*/
|
|
|
|
if (error) {
|
|
|
|
--wpipe->pipe_busy;
|
2004-01-11 19:54:45 +00:00
|
|
|
if ((wpipe->pipe_busy == 0) &&
|
2001-06-04 04:04:45 +00:00
|
|
|
(wpipe->pipe_state & PIPE_WANT)) {
|
|
|
|
wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-06-04 04:04:45 +00:00
|
|
|
return(error);
|
|
|
|
}
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1996-02-05 05:50:34 +00:00
|
|
|
orig_resid = uio->uio_resid;
|
2001-06-04 04:04:45 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
while (uio->uio_resid) {
|
1996-02-04 19:56:35 +00:00
|
|
|
int space;
|
2001-05-17 19:47:09 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* If the transfer is large, we can gain performance if
|
|
|
|
* we do process-to-process copies directly.
|
1996-06-17 05:15:01 +00:00
|
|
|
* If the write is non-blocking, we don't use the
|
|
|
|
* direct write mechanism.
|
2000-03-24 00:47:37 +00:00
|
|
|
*
|
|
|
|
* The direct write mechanism will detect the reader going
|
|
|
|
* away on us.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
1996-07-13 22:52:50 +00:00
|
|
|
if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
|
|
|
|
(fp->f_flag & FNONBLOCK) == 0 &&
|
2004-01-11 19:54:45 +00:00
|
|
|
amountpipekvawired + uio->uio_resid < maxpipekvawired) {
|
2002-10-12 22:26:41 +00:00
|
|
|
error = pipe_direct_write(wpipe, uio);
|
2001-05-17 19:47:09 +00:00
|
|
|
if (error)
|
1996-02-04 19:56:35 +00:00
|
|
|
break;
|
|
|
|
continue;
|
2002-02-27 07:35:59 +00:00
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pipe buffered writes cannot be coincidental with
|
|
|
|
* direct writes. We wait until the currently executing
|
|
|
|
* direct write is completed before we start filling the
|
2000-03-24 00:47:37 +00:00
|
|
|
* pipe buffer. We break out if a signal occurs or the
|
|
|
|
* reader goes away.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
|
|
|
retrywrite:
|
|
|
|
while (wpipe->pipe_state & PIPE_DIRECTW) {
|
1996-02-09 04:36:36 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
2002-02-27 17:23:16 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
|
2002-02-27 07:35:59 +00:00
|
|
|
"pipbww", 0);
|
2000-03-24 00:47:37 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_EOF)
|
|
|
|
break;
|
1996-02-04 19:56:35 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
2000-03-24 00:47:37 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
error = EPIPE;
|
|
|
|
break;
|
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
|
1996-03-17 04:52:10 +00:00
|
|
|
|
|
|
|
/* Writes of size <= PIPE_BUF must be atomic. */
|
1996-02-05 05:50:34 +00:00
|
|
|
if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
|
|
|
|
space = 0;
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2003-07-30 22:50:37 +00:00
|
|
|
if (space > 0) {
|
1996-02-04 19:56:35 +00:00
|
|
|
if ((error = pipelock(wpipe,1)) == 0) {
|
1999-12-13 02:55:47 +00:00
|
|
|
int size; /* Transfer size */
|
|
|
|
int segsize; /* first segment to transfer */
|
2001-05-17 19:47:09 +00:00
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* It is possible for a direct write to
|
|
|
|
* slip in on us... handle it here...
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW) {
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
goto retrywrite;
|
|
|
|
}
|
2004-01-11 19:54:45 +00:00
|
|
|
/*
|
1999-12-13 02:55:47 +00:00
|
|
|
* If a process blocked in uiomove, our
|
|
|
|
* value for space might be bad.
|
2000-03-24 00:47:37 +00:00
|
|
|
*
|
|
|
|
* XXX will we be ok if the reader has gone
|
|
|
|
* away here?
|
1999-12-13 02:55:47 +00:00
|
|
|
*/
|
2004-01-11 19:54:45 +00:00
|
|
|
if (space > wpipe->pipe_buffer.size -
|
1999-12-13 02:55:47 +00:00
|
|
|
wpipe->pipe_buffer.cnt) {
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
goto retrywrite;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer size is minimum of uio transfer
|
|
|
|
* and free space in pipe buffer.
|
|
|
|
*/
|
|
|
|
if (space > uio->uio_resid)
|
|
|
|
size = uio->uio_resid;
|
|
|
|
else
|
|
|
|
size = space;
|
|
|
|
/*
|
2004-01-11 19:54:45 +00:00
|
|
|
* First segment to transfer is minimum of
|
1999-12-13 02:55:47 +00:00
|
|
|
* transfer size and contiguous space in
|
|
|
|
* pipe buffer. If first segment to transfer
|
|
|
|
* is less than the transfer size, we've got
|
|
|
|
* a wraparound in the buffer.
|
|
|
|
*/
|
2004-01-11 19:54:45 +00:00
|
|
|
segsize = wpipe->pipe_buffer.size -
|
1999-12-13 02:55:47 +00:00
|
|
|
wpipe->pipe_buffer.in;
|
|
|
|
if (segsize > size)
|
|
|
|
segsize = size;
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1999-12-13 02:55:47 +00:00
|
|
|
/* Transfer first segment */
|
|
|
|
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2004-01-11 19:54:45 +00:00
|
|
|
error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
|
1999-12-13 02:55:47 +00:00
|
|
|
segsize, uio);
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1999-12-13 02:55:47 +00:00
|
|
|
if (error == 0 && segsize < size) {
|
2004-01-11 19:54:45 +00:00
|
|
|
/*
|
1999-12-13 02:55:47 +00:00
|
|
|
* Transfer remaining part now, to
|
|
|
|
* support atomic writes. Wraparound
|
|
|
|
* happened.
|
|
|
|
*/
|
2004-01-11 19:54:45 +00:00
|
|
|
if (wpipe->pipe_buffer.in + segsize !=
|
1999-12-13 02:55:47 +00:00
|
|
|
wpipe->pipe_buffer.size)
|
2003-06-09 21:57:48 +00:00
|
|
|
panic("Expected pipe buffer "
|
|
|
|
"wraparound disappeared");
|
2004-01-11 19:54:45 +00:00
|
|
|
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2003-06-09 21:57:48 +00:00
|
|
|
error = uiomove(
|
|
|
|
&wpipe->pipe_buffer.buffer[0],
|
2004-01-11 19:54:45 +00:00
|
|
|
size - segsize, uio);
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
1999-12-13 02:55:47 +00:00
|
|
|
}
|
|
|
|
if (error == 0) {
|
|
|
|
wpipe->pipe_buffer.in += size;
|
|
|
|
if (wpipe->pipe_buffer.in >=
|
|
|
|
wpipe->pipe_buffer.size) {
|
2003-06-09 21:57:48 +00:00
|
|
|
if (wpipe->pipe_buffer.in !=
|
|
|
|
size - segsize +
|
|
|
|
wpipe->pipe_buffer.size)
|
|
|
|
panic("Expected "
|
|
|
|
"wraparound bad");
|
|
|
|
wpipe->pipe_buffer.in = size -
|
|
|
|
segsize;
|
1999-12-13 02:55:47 +00:00
|
|
|
}
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1999-12-13 02:55:47 +00:00
|
|
|
wpipe->pipe_buffer.cnt += size;
|
2003-06-09 21:57:48 +00:00
|
|
|
if (wpipe->pipe_buffer.cnt >
|
|
|
|
wpipe->pipe_buffer.size)
|
1999-12-13 02:55:47 +00:00
|
|
|
panic("Pipe buffer overflow");
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1999-12-13 02:55:47 +00:00
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
pipeunlock(wpipe);
|
|
|
|
}
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the "read-side" has been blocked, wake it up now.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* don't block on non-blocking I/O
|
|
|
|
*/
|
1996-07-04 04:36:56 +00:00
|
|
|
if (fp->f_flag & FNONBLOCK) {
|
1996-02-04 19:56:35 +00:00
|
|
|
error = EAGAIN;
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* We have no more space and have something to offer,
|
1997-09-14 02:43:25 +00:00
|
|
|
* wake up select/poll.
|
1996-02-11 22:09:50 +00:00
|
|
|
*/
|
|
|
|
pipeselwakeup(wpipe);
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2002-02-27 17:23:16 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(rpipe),
|
2002-02-27 07:35:59 +00:00
|
|
|
PRIBIO | PCATCH, "pipewr", 0);
|
2001-05-17 19:47:09 +00:00
|
|
|
if (error != 0)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* If read side wants to go away, we just issue a signal
|
|
|
|
* to ourselves.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
1996-01-31 02:05:12 +00:00
|
|
|
error = EPIPE;
|
1996-02-04 19:56:35 +00:00
|
|
|
break;
|
2004-01-11 19:54:45 +00:00
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1996-03-17 04:52:10 +00:00
|
|
|
--wpipe->pipe_busy;
|
2001-06-04 04:04:45 +00:00
|
|
|
|
2001-05-17 19:47:09 +00:00
|
|
|
if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
|
|
|
|
wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
|
1996-01-28 23:38:26 +00:00
|
|
|
wakeup(wpipe);
|
|
|
|
} else if (wpipe->pipe_buffer.cnt > 0) {
|
|
|
|
/*
|
|
|
|
* If we have put any characters in the buffer, we wake up
|
|
|
|
* the reader.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
|
|
|
}
|
1996-02-04 22:09:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't return EPIPE if I/O was successful
|
|
|
|
*/
|
1996-02-04 19:56:35 +00:00
|
|
|
if ((wpipe->pipe_buffer.cnt == 0) &&
|
2001-06-04 04:04:45 +00:00
|
|
|
(uio->uio_resid == 0) &&
|
|
|
|
(error == EPIPE)) {
|
1996-02-04 19:56:35 +00:00
|
|
|
error = 0;
|
2001-06-04 04:04:45 +00:00
|
|
|
}
|
1996-02-05 05:50:34 +00:00
|
|
|
|
1997-03-22 06:53:45 +00:00
|
|
|
if (error == 0)
|
1999-12-26 13:04:52 +00:00
|
|
|
vfs_timestamp(&wpipe->pipe_mtime);
|
1997-03-22 06:53:45 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* We have something to offer,
|
1997-09-14 02:43:25 +00:00
|
|
|
* wake up select/poll.
|
1996-02-11 22:09:50 +00:00
|
|
|
*/
|
1996-02-22 03:33:52 +00:00
|
|
|
if (wpipe->pipe_buffer.cnt)
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we implement a very minimal set of ioctls for compatibility with sockets.
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2002-08-17 02:36:16 +00:00
|
|
|
pipe_ioctl(fp, cmd, data, active_cred, td)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct file *fp;
|
1998-06-07 17:13:14 +00:00
|
|
|
u_long cmd;
|
2002-06-29 01:50:25 +00:00
|
|
|
void *data;
|
2002-08-17 02:36:16 +00:00
|
|
|
struct ucred *active_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct pipe *mpipe = fp->f_data;
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
int error;
|
2002-10-01 04:30:19 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
PIPE_LOCK(mpipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
|
2002-10-01 04:30:19 +00:00
|
|
|
#ifdef MAC
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
error = mac_check_pipe_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
|
2003-11-03 17:58:23 +00:00
|
|
|
if (error) {
|
|
|
|
PIPE_UNLOCK(mpipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
return (error);
|
2003-11-03 17:58:23 +00:00
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#endif
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
case FIONBIO:
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
case FIOASYNC:
|
|
|
|
if (*(int *)data) {
|
|
|
|
mpipe->pipe_state |= PIPE_ASYNC;
|
|
|
|
} else {
|
|
|
|
mpipe->pipe_state &= ~PIPE_ASYNC;
|
|
|
|
}
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
case FIONREAD:
|
1996-02-11 22:09:50 +00:00
|
|
|
if (mpipe->pipe_state & PIPE_DIRECTW)
|
|
|
|
*(int *)data = mpipe->pipe_map.cnt;
|
|
|
|
else
|
|
|
|
*(int *)data = mpipe->pipe_buffer.cnt;
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
return (0);
|
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
case FIOSETOWN:
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
return (fsetown(*(int *)data, &mpipe->pipe_sigio));
|
|
|
|
|
|
|
|
case FIOGETOWN:
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
2002-10-03 02:13:00 +00:00
|
|
|
*(int *)data = fgetown(&mpipe->pipe_sigio);
|
1996-01-28 23:38:26 +00:00
|
|
|
return (0);
|
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/* This is deprecated, FIOSETOWN should be used instead. */
|
|
|
|
case TIOCSPGRP:
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
|
|
|
|
|
|
|
|
/* This is deprecated, FIOGETOWN should be used instead. */
|
1996-10-11 02:27:30 +00:00
|
|
|
case TIOCGPGRP:
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
2002-10-03 02:13:00 +00:00
|
|
|
*(int *)data = -fgetown(&mpipe->pipe_sigio);
|
1996-01-28 23:38:26 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
}
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
1996-07-12 08:14:58 +00:00
|
|
|
return (ENOTTY);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
pipe_poll(fp, events, active_cred, td)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct file *fp;
|
1997-09-14 02:43:25 +00:00
|
|
|
int events;
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
struct ucred *active_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct pipe *rpipe = fp->f_data;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe *wpipe;
|
1997-09-14 02:43:25 +00:00
|
|
|
int revents = 0;
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
int error;
|
|
|
|
#endif
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
wpipe = rpipe->pipe_peer;
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
error = mac_check_pipe_poll(active_cred, rpipe->pipe_pair);
|
2002-08-13 02:47:13 +00:00
|
|
|
if (error)
|
|
|
|
goto locked_error;
|
|
|
|
#endif
|
1997-09-14 02:43:25 +00:00
|
|
|
if (events & (POLLIN | POLLRDNORM))
|
|
|
|
if ((rpipe->pipe_state & PIPE_DIRECTW) ||
|
|
|
|
(rpipe->pipe_buffer.cnt > 0) ||
|
|
|
|
(rpipe->pipe_state & PIPE_EOF))
|
|
|
|
revents |= events & (POLLIN | POLLRDNORM);
|
|
|
|
|
|
|
|
if (events & (POLLOUT | POLLWRNORM))
|
|
|
|
if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) ||
|
1999-01-28 00:57:57 +00:00
|
|
|
(((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
|
|
|
|
(wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
|
1997-09-14 02:43:25 +00:00
|
|
|
revents |= events & (POLLOUT | POLLWRNORM);
|
|
|
|
|
|
|
|
if ((rpipe->pipe_state & PIPE_EOF) ||
|
|
|
|
(wpipe == NULL) ||
|
|
|
|
(wpipe->pipe_state & PIPE_EOF))
|
|
|
|
revents |= POLLHUP;
|
|
|
|
|
|
|
|
if (revents == 0) {
|
|
|
|
if (events & (POLLIN | POLLRDNORM)) {
|
2001-09-21 22:46:54 +00:00
|
|
|
selrecord(td, &rpipe->pipe_sel);
|
1997-09-14 02:43:25 +00:00
|
|
|
rpipe->pipe_state |= PIPE_SEL;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
1997-09-14 02:43:25 +00:00
|
|
|
|
|
|
|
if (events & (POLLOUT | POLLWRNORM)) {
|
2001-09-21 22:46:54 +00:00
|
|
|
selrecord(td, &wpipe->pipe_sel);
|
1997-10-06 08:30:08 +00:00
|
|
|
wpipe->pipe_state |= PIPE_SEL;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
locked_error:
|
|
|
|
#endif
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
1997-09-14 02:43:25 +00:00
|
|
|
|
|
|
|
return (revents);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
2002-06-28 22:35:12 +00:00
|
|
|
/*
|
|
|
|
* We shouldn't need locks here as we're doing a read and this should
|
|
|
|
* be a natural race.
|
|
|
|
*/
|
1999-11-08 03:28:49 +00:00
|
|
|
static int
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
pipe_stat(fp, ub, active_cred, td)
|
1999-11-08 03:28:49 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct stat *ub;
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
struct ucred *active_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct pipe *pipe = fp->f_data;
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
int error;
|
1999-11-08 03:28:49 +00:00
|
|
|
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_LOCK(pipe);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
error = mac_check_pipe_stat(active_cred, pipe->pipe_pair);
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_UNLOCK(pipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
#endif
|
2002-07-22 19:05:44 +00:00
|
|
|
bzero(ub, sizeof(*ub));
|
1996-07-12 08:14:58 +00:00
|
|
|
ub->st_mode = S_IFIFO;
|
1996-02-04 19:56:35 +00:00
|
|
|
ub->st_blksize = pipe->pipe_buffer.size;
|
1996-01-28 23:38:26 +00:00
|
|
|
ub->st_size = pipe->pipe_buffer.cnt;
|
|
|
|
ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
|
1998-03-26 20:54:05 +00:00
|
|
|
ub->st_atimespec = pipe->pipe_atime;
|
|
|
|
ub->st_mtimespec = pipe->pipe_mtime;
|
|
|
|
ub->st_ctimespec = pipe->pipe_ctime;
|
2000-05-11 22:08:20 +00:00
|
|
|
ub->st_uid = fp->f_cred->cr_uid;
|
|
|
|
ub->st_gid = fp->f_cred->cr_gid;
|
1996-07-12 08:14:58 +00:00
|
|
|
/*
|
2000-05-11 22:08:20 +00:00
|
|
|
* Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
|
1996-07-12 08:14:58 +00:00
|
|
|
* XXX (st_dev, st_ino) should be unique.
|
|
|
|
*/
|
2001-05-17 19:47:09 +00:00
|
|
|
return (0);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
pipe_close(fp, td)
|
1996-01-28 23:38:26 +00:00
|
|
|
struct file *fp;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct pipe *cpipe = fp->f_data;
|
1996-06-12 05:11:41 +00:00
|
|
|
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_ops = &badfileops;
|
2003-01-13 00:33:17 +00:00
|
|
|
fp->f_data = NULL;
|
2002-05-06 19:31:28 +00:00
|
|
|
funsetown(&cpipe->pipe_sigio);
|
1996-01-28 23:38:26 +00:00
|
|
|
pipeclose(cpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (0);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
2001-05-08 09:09:18 +00:00
|
|
|
static void
|
|
|
|
pipe_free_kmem(cpipe)
|
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
2002-02-27 18:49:58 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
|
|
|
|
("pipe_free_kmem: pipe mutex locked"));
|
2001-05-08 09:09:18 +00:00
|
|
|
|
|
|
|
if (cpipe->pipe_buffer.buffer != NULL) {
|
|
|
|
if (cpipe->pipe_buffer.size > PIPE_SIZE)
|
2003-07-09 21:59:48 +00:00
|
|
|
atomic_subtract_int(&nbigpipe, 1);
|
2003-02-13 19:39:54 +00:00
|
|
|
atomic_subtract_int(&amountpipekva, cpipe->pipe_buffer.size);
|
2003-07-08 04:02:31 +00:00
|
|
|
atomic_subtract_int(&amountpipes, 1);
|
2003-08-11 05:51:51 +00:00
|
|
|
vm_map_remove(pipe_map,
|
|
|
|
(vm_offset_t)cpipe->pipe_buffer.buffer,
|
|
|
|
(vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
|
2001-05-08 09:09:18 +00:00
|
|
|
cpipe->pipe_buffer.buffer = NULL;
|
|
|
|
}
|
|
|
|
#ifndef PIPE_NODIRECT
|
2002-08-21 23:39:52 +00:00
|
|
|
if (cpipe->pipe_map.kva != 0) {
|
2003-08-11 05:51:51 +00:00
|
|
|
atomic_subtract_int(&amountpipekvawired,
|
2003-02-13 19:39:54 +00:00
|
|
|
cpipe->pipe_buffer.size + PAGE_SIZE);
|
2001-05-08 09:09:18 +00:00
|
|
|
kmem_free(kernel_map,
|
|
|
|
cpipe->pipe_map.kva,
|
|
|
|
cpipe->pipe_buffer.size + PAGE_SIZE);
|
|
|
|
cpipe->pipe_map.cnt = 0;
|
|
|
|
cpipe->pipe_map.kva = 0;
|
|
|
|
cpipe->pipe_map.pos = 0;
|
|
|
|
cpipe->pipe_map.npages = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* shutdown the pipe
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pipeclose(cpipe)
|
|
|
|
struct pipe *cpipe;
|
|
|
|
{
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
struct pipepair *pp;
|
1996-02-04 19:56:35 +00:00
|
|
|
struct pipe *ppipe;
|
2002-03-09 22:06:31 +00:00
|
|
|
int hadpeer;
|
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
|
2002-03-09 22:06:31 +00:00
|
|
|
|
|
|
|
hadpeer = 0;
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
PIPE_LOCK(cpipe);
|
|
|
|
pp = cpipe->pipe_pair;
|
2004-01-11 19:54:45 +00:00
|
|
|
|
2002-03-09 22:06:31 +00:00
|
|
|
pipeselwakeup(cpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2002-03-09 22:06:31 +00:00
|
|
|
/*
|
|
|
|
* If the other side is blocked, wake it up saying that
|
|
|
|
* we want to close it down.
|
|
|
|
*/
|
|
|
|
while (cpipe->pipe_busy) {
|
|
|
|
wakeup(cpipe);
|
|
|
|
cpipe->pipe_state |= PIPE_WANT | PIPE_EOF;
|
|
|
|
msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
|
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2002-08-13 02:47:13 +00:00
|
|
|
|
2002-03-09 22:06:31 +00:00
|
|
|
/*
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
* Disconnect from peer, if any.
|
2002-03-09 22:06:31 +00:00
|
|
|
*/
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
ppipe = cpipe->pipe_peer;
|
|
|
|
if (ppipe->pipe_present != 0) {
|
2002-03-09 22:06:31 +00:00
|
|
|
hadpeer++;
|
|
|
|
pipeselwakeup(ppipe);
|
|
|
|
|
|
|
|
ppipe->pipe_state |= PIPE_EOF;
|
|
|
|
wakeup(ppipe);
|
|
|
|
KNOTE(&ppipe->pipe_sel.si_note, 0);
|
|
|
|
}
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
|
2002-03-09 22:06:31 +00:00
|
|
|
/*
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
* Mark this endpoint as free. Release kmem resources. We
|
|
|
|
* don't mark this endpoint as unused until we've finished
|
|
|
|
* doing that, or the pipe might disappear out from under
|
|
|
|
* us.
|
2002-03-09 22:06:31 +00:00
|
|
|
*/
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
2002-03-09 22:06:31 +00:00
|
|
|
pipe_free_kmem(cpipe);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
PIPE_LOCK(cpipe);
|
|
|
|
cpipe->pipe_present = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If both endpoints are now closed, release the memory for the
|
|
|
|
* pipe pair. If not, unlock.
|
|
|
|
*/
|
|
|
|
if (ppipe->pipe_present == 0) {
|
|
|
|
PIPE_UNLOCK(cpipe);
|
|
|
|
#ifdef MAC
|
|
|
|
mac_destroy_pipe(pp);
|
|
|
|
#endif
|
|
|
|
uma_zfree(pipe_zone, cpipe->pipe_pair);
|
|
|
|
} else
|
|
|
|
PIPE_UNLOCK(cpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2001-02-15 16:34:11 +00:00
|
|
|
/*ARGSUSED*/
|
2000-04-16 18:53:38 +00:00
|
|
|
static int
|
2001-02-15 16:34:11 +00:00
|
|
|
pipe_kqfilter(struct file *fp, struct knote *kn)
|
2000-04-16 18:53:38 +00:00
|
|
|
{
|
2002-01-13 11:58:06 +00:00
|
|
|
struct pipe *cpipe;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2003-01-13 00:33:17 +00:00
|
|
|
cpipe = kn->kn_fp->f_data;
|
2001-02-15 16:34:11 +00:00
|
|
|
switch (kn->kn_filter) {
|
|
|
|
case EVFILT_READ:
|
|
|
|
kn->kn_fop = &pipe_rfiltops;
|
|
|
|
break;
|
|
|
|
case EVFILT_WRITE:
|
|
|
|
kn->kn_fop = &pipe_wfiltops;
|
2001-06-15 20:45:01 +00:00
|
|
|
cpipe = cpipe->pipe_peer;
|
2002-08-05 15:03:03 +00:00
|
|
|
if (cpipe == NULL)
|
|
|
|
/* other end of pipe has been closed */
|
2003-08-15 04:31:01 +00:00
|
|
|
return (EPIPE);
|
2001-02-15 16:34:11 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (1);
|
|
|
|
}
|
2001-06-15 20:45:01 +00:00
|
|
|
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_LOCK(cpipe);
|
2001-06-15 20:45:01 +00:00
|
|
|
SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
filt_pipedetach(struct knote *kn)
|
|
|
|
{
|
2003-10-12 07:06:02 +00:00
|
|
|
struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
|
|
|
|
|
|
|
|
if (kn->kn_filter == EVFILT_WRITE) {
|
|
|
|
if (cpipe->pipe_peer == NULL)
|
|
|
|
return;
|
|
|
|
cpipe = cpipe->pipe_peer;
|
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_LOCK(cpipe);
|
2001-06-15 20:45:01 +00:00
|
|
|
SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_piperead(struct knote *kn, long hint)
|
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct pipe *rpipe = kn->kn_fp->f_data;
|
2000-04-16 18:53:38 +00:00
|
|
|
struct pipe *wpipe = rpipe->pipe_peer;
|
|
|
|
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
kn->kn_data = rpipe->pipe_buffer.cnt;
|
|
|
|
if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
|
|
|
|
kn->kn_data = rpipe->pipe_map.cnt;
|
|
|
|
|
|
|
|
if ((rpipe->pipe_state & PIPE_EOF) ||
|
|
|
|
(wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
|
2002-02-27 11:27:48 +00:00
|
|
|
kn->kn_flags |= EV_EOF;
|
|
|
|
PIPE_UNLOCK(rpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
return (1);
|
|
|
|
}
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
return (kn->kn_data > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_pipewrite(struct knote *kn, long hint)
|
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct pipe *rpipe = kn->kn_fp->f_data;
|
2000-04-16 18:53:38 +00:00
|
|
|
struct pipe *wpipe = rpipe->pipe_peer;
|
|
|
|
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
|
|
|
|
kn->kn_data = 0;
|
2004-01-11 19:54:45 +00:00
|
|
|
kn->kn_flags |= EV_EOF;
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
|
2000-09-14 20:10:19 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW)
|
2000-04-16 18:53:38 +00:00
|
|
|
kn->kn_data = 0;
|
|
|
|
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
return (kn->kn_data >= PIPE_BUF);
|
|
|
|
}
|