2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
2017-11-27 15:20:12 +00:00
|
|
|
* SPDX-License-Identifier: BSD-4-Clause
|
|
|
|
*
|
1996-01-28 23:38:26 +00:00
|
|
|
* Copyright (c) 1996 John S. Dyson
|
2012-02-23 18:37:30 +00:00
|
|
|
* Copyright (c) 2012 Giovanni Trematerra
|
1996-01-28 23:38:26 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice immediately at the beginning of the file, without modification,
|
|
|
|
* this list of conditions, and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Absolutely no warranty of function or purpose is made by the author
|
|
|
|
* John S. Dyson.
|
1996-02-11 22:09:50 +00:00
|
|
|
* 4. Modifications may be freely made to this file if the above conditions
|
1996-01-28 23:38:26 +00:00
|
|
|
* are met.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file contains a high-performance replacement for the socket-based
|
|
|
|
* pipes scheme originally used in FreeBSD/4.4Lite. It does not support
|
|
|
|
* all features of sockets, but does do everything that pipes normally
|
|
|
|
* do.
|
|
|
|
*/
|
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* This code has two modes of operation, a small write mode and a large
|
|
|
|
* write mode. The small write mode acts like conventional pipes with
|
|
|
|
* a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
|
|
|
|
* "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
|
2011-03-20 15:04:43 +00:00
|
|
|
* and PIPE_SIZE in size, the sending process pins the underlying pages in
|
|
|
|
* memory, and the receiving process copies directly from these pinned pages
|
|
|
|
* in the sending process.
|
1996-02-04 19:56:35 +00:00
|
|
|
*
|
|
|
|
* If the sending process receives a signal, it is possible that it will
|
1996-02-05 05:50:34 +00:00
|
|
|
* go away, and certainly its address space can change, because control
|
1996-02-04 19:56:35 +00:00
|
|
|
* is returned back to the user-mode side. In that case, the pipe code
|
|
|
|
* arranges to copy the buffer supplied by the user process, to a pageable
|
|
|
|
* kernel buffer, and the receiving process will grab the data from the
|
|
|
|
* pageable kernel buffer. Since signals don't happen all that often,
|
|
|
|
* the copy operation is normally eliminated.
|
|
|
|
*
|
|
|
|
* The constant PIPE_MINDIRECT is chosen to make sure that buffering will
|
|
|
|
* happen for small transfers so that the system will not spend all of
|
2003-08-11 05:51:51 +00:00
|
|
|
* its time context switching.
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
2003-08-11 05:51:51 +00:00
|
|
|
* In order to limit the resource use of pipes, two sysctls exist:
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
2003-08-11 05:51:51 +00:00
|
|
|
* kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
|
2004-08-16 01:27:24 +00:00
|
|
|
* address space available to us in pipe_map. This value is normally
|
|
|
|
* autotuned, but may also be loader tuned.
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
2004-08-16 01:27:24 +00:00
|
|
|
* kern.ipc.pipekva - This read-only sysctl tracks the current amount of
|
|
|
|
* memory in use by pipes.
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
2004-08-16 01:27:24 +00:00
|
|
|
* Based on how large pipekva is relative to maxpipekva, the following
|
|
|
|
* will happen:
|
2003-07-08 04:02:31 +00:00
|
|
|
*
|
2004-08-16 01:27:24 +00:00
|
|
|
* 0% - 50%:
|
|
|
|
* New pipes are given 16K of memory backing, pipes may dynamically
|
|
|
|
* grow to as large as 64K where needed.
|
|
|
|
* 50% - 75%:
|
|
|
|
* New pipes are given 4K (or PAGE_SIZE) of memory backing,
|
|
|
|
* existing pipes may NOT grow.
|
|
|
|
* 75% - 100%:
|
|
|
|
* New pipes are given 4K (or PAGE_SIZE) of memory backing,
|
|
|
|
* existing pipes will be shrunk down to 4K whenever possible.
|
|
|
|
*
|
|
|
|
* Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If
|
|
|
|
* that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
|
|
|
|
* resize which MUST occur for reverse-direction pipes when they are
|
|
|
|
* first used.
|
|
|
|
*
|
|
|
|
* Additional information about the current state of pipes may be obtained
|
|
|
|
* from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
|
|
|
|
* and kern.ipc.piperesizefail.
|
2004-08-03 02:59:15 +00:00
|
|
|
*
|
|
|
|
* Locking rules: There are two locks present here: A mutex, used via
|
|
|
|
* PIPE_LOCK, and a flag, used via pipelock(). All locking is done via
|
|
|
|
* the flag, as mutexes can not persist over uiomove. The mutex
|
|
|
|
* exists only to guard access to the flag, and is not in itself a
|
2004-08-16 01:27:24 +00:00
|
|
|
* locking mechanism. Also note that there is only a single mutex for
|
|
|
|
* both directions of a pipe.
|
2004-08-03 02:59:15 +00:00
|
|
|
*
|
|
|
|
* As pipelock() may have to sleep before it can acquire the flag, it
|
|
|
|
* is important to reread all data after a call to pipelock(); everything
|
|
|
|
* in the structure may have changed.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2011-10-05 16:56:06 +00:00
|
|
|
#include <sys/conf.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/filedesc.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/filio.h>
|
2002-02-27 11:27:48 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/lock.h>
|
2001-05-19 01:28:09 +00:00
|
|
|
#include <sys/mutex.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/ttycom.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/stat.h>
|
2002-03-09 22:06:31 +00:00
|
|
|
#include <sys/malloc.h>
|
1997-09-14 02:43:25 +00:00
|
|
|
#include <sys/poll.h>
|
2001-01-09 04:33:49 +00:00
|
|
|
#include <sys/selinfo.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/signalvar.h>
|
2008-11-11 14:55:59 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
2003-07-08 04:02:31 +00:00
|
|
|
#include <sys/sysctl.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <sys/sysproto.h>
|
|
|
|
#include <sys/pipe.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/proc.h>
|
1999-12-26 13:04:52 +00:00
|
|
|
#include <sys/vnode.h>
|
1998-03-28 10:33:27 +00:00
|
|
|
#include <sys/uio.h>
|
2014-09-22 16:20:47 +00:00
|
|
|
#include <sys/user.h>
|
2000-04-16 18:53:38 +00:00
|
|
|
#include <sys/event.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
1996-02-04 19:56:35 +00:00
|
|
|
#include <vm/vm_page.h>
|
2002-03-20 04:09:59 +00:00
|
|
|
#include <vm/uma.h>
|
1996-01-28 23:38:26 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* Use this define if you want to disable *fancy* VM things. Expect an
|
|
|
|
* approx 30% decrease in transfer rate. This could be useful for
|
|
|
|
* NetBSD or OpenBSD.
|
|
|
|
*/
|
|
|
|
/* #define PIPE_NODIRECT */
|
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
#define PIPE_PEER(pipe) \
|
|
|
|
(((pipe)->pipe_state & PIPE_NAMED) ? (pipe) : ((pipe)->pipe_peer))
|
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* interfaces to the outside world
|
|
|
|
*/
|
2002-12-24 09:44:51 +00:00
|
|
|
static fo_rdwr_t pipe_read;
|
|
|
|
static fo_rdwr_t pipe_write;
|
2008-01-07 20:05:19 +00:00
|
|
|
static fo_truncate_t pipe_truncate;
|
2002-12-24 09:44:51 +00:00
|
|
|
static fo_ioctl_t pipe_ioctl;
|
|
|
|
static fo_poll_t pipe_poll;
|
|
|
|
static fo_kqfilter_t pipe_kqfilter;
|
|
|
|
static fo_stat_t pipe_stat;
|
|
|
|
static fo_close_t pipe_close;
|
2012-02-26 15:14:29 +00:00
|
|
|
static fo_chmod_t pipe_chmod;
|
|
|
|
static fo_chown_t pipe_chown;
|
2014-09-22 16:20:47 +00:00
|
|
|
static fo_fill_kinfo_t pipe_fill_kinfo;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
struct fileops pipeops = {
|
2003-06-18 18:16:40 +00:00
|
|
|
.fo_read = pipe_read,
|
|
|
|
.fo_write = pipe_write,
|
2008-01-07 20:05:19 +00:00
|
|
|
.fo_truncate = pipe_truncate,
|
2003-06-18 18:16:40 +00:00
|
|
|
.fo_ioctl = pipe_ioctl,
|
|
|
|
.fo_poll = pipe_poll,
|
|
|
|
.fo_kqfilter = pipe_kqfilter,
|
|
|
|
.fo_stat = pipe_stat,
|
|
|
|
.fo_close = pipe_close,
|
2012-02-26 15:14:29 +00:00
|
|
|
.fo_chmod = pipe_chmod,
|
|
|
|
.fo_chown = pipe_chown,
|
2013-08-15 07:54:31 +00:00
|
|
|
.fo_sendfile = invfo_sendfile,
|
2014-09-22 16:20:47 +00:00
|
|
|
.fo_fill_kinfo = pipe_fill_kinfo,
|
2003-06-18 18:16:40 +00:00
|
|
|
.fo_flags = DFLAG_PASSABLE
|
2001-02-15 16:34:11 +00:00
|
|
|
};
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
static void filt_pipedetach(struct knote *kn);
|
2012-02-23 18:37:30 +00:00
|
|
|
static void filt_pipedetach_notsup(struct knote *kn);
|
|
|
|
static int filt_pipenotsup(struct knote *kn, long hint);
|
2000-04-16 18:53:38 +00:00
|
|
|
static int filt_piperead(struct knote *kn, long hint);
|
|
|
|
static int filt_pipewrite(struct knote *kn, long hint);
|
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
static struct filterops pipe_nfiltops = {
|
|
|
|
.f_isfd = 1,
|
|
|
|
.f_detach = filt_pipedetach_notsup,
|
|
|
|
.f_event = filt_pipenotsup
|
|
|
|
};
|
2009-09-12 20:03:45 +00:00
|
|
|
static struct filterops pipe_rfiltops = {
|
|
|
|
.f_isfd = 1,
|
|
|
|
.f_detach = filt_pipedetach,
|
|
|
|
.f_event = filt_piperead
|
|
|
|
};
|
|
|
|
static struct filterops pipe_wfiltops = {
|
|
|
|
.f_isfd = 1,
|
|
|
|
.f_detach = filt_pipedetach,
|
|
|
|
.f_event = filt_pipewrite
|
|
|
|
};
|
2001-02-15 16:34:11 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* Default pipe buffer size(s), this can be kind-of large now because pipe
|
|
|
|
* space is pageable. The pipe code will try to maintain locality of
|
|
|
|
* reference for performance reasons, so small amounts of outstanding I/O
|
|
|
|
* will not wipe the cache.
|
|
|
|
*/
|
1996-02-04 19:56:35 +00:00
|
|
|
#define MINPIPESIZE (PIPE_SIZE/3)
|
|
|
|
#define MAXPIPESIZE (2*PIPE_SIZE/3)
|
|
|
|
|
2009-03-10 21:28:43 +00:00
|
|
|
static long amountpipekva;
|
2004-08-16 01:27:24 +00:00
|
|
|
static int pipefragretry;
|
|
|
|
static int pipeallocfail;
|
|
|
|
static int piperesizefail;
|
|
|
|
static int piperesizeallowed = 1;
|
2003-07-08 04:02:31 +00:00
|
|
|
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
|
2003-07-08 04:02:31 +00:00
|
|
|
&maxpipekva, 0, "Pipe KVA limit");
|
2009-03-10 21:28:43 +00:00
|
|
|
SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
|
2003-07-08 04:02:31 +00:00
|
|
|
&amountpipekva, 0, "Pipe KVA usage");
|
2004-08-16 01:27:24 +00:00
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
|
|
|
|
&pipefragretry, 0, "Pipe allocation retries due to fragmentation");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
|
|
|
|
&pipeallocfail, 0, "Pipe allocation failures");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
|
|
|
|
&piperesizefail, 0, "Pipe resize failures");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
|
|
|
|
&piperesizeallowed, 0, "Pipe resizing allowed");
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2002-02-27 18:51:53 +00:00
|
|
|
static void pipeinit(void *dummy __unused);
|
|
|
|
static void pipeclose(struct pipe *cpipe);
|
|
|
|
static void pipe_free_kmem(struct pipe *cpipe);
|
2014-05-02 00:52:13 +00:00
|
|
|
static void pipe_create(struct pipe *pipe, int backing);
|
|
|
|
static void pipe_paircreate(struct thread *td, struct pipepair **p_pp);
|
2002-02-27 18:51:53 +00:00
|
|
|
static __inline int pipelock(struct pipe *cpipe, int catch);
|
|
|
|
static __inline void pipeunlock(struct pipe *cpipe);
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
2002-02-27 18:51:53 +00:00
|
|
|
static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
|
|
|
|
static void pipe_destroy_write_buffer(struct pipe *wpipe);
|
|
|
|
static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
|
|
|
|
static void pipe_clone_write_buffer(struct pipe *wpipe);
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
2002-02-27 18:51:53 +00:00
|
|
|
static int pipespace(struct pipe *cpipe, int size);
|
2004-07-23 14:11:04 +00:00
|
|
|
static int pipespace_new(struct pipe *cpipe, int size);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2004-08-02 00:18:36 +00:00
|
|
|
static int pipe_zone_ctor(void *mem, int size, void *arg, int flags);
|
|
|
|
static int pipe_zone_init(void *mem, int size, int flags);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
static void pipe_zone_fini(void *mem, int size);
|
|
|
|
|
2002-03-20 04:09:59 +00:00
|
|
|
static uma_zone_t pipe_zone;
|
2018-11-20 14:59:27 +00:00
|
|
|
static struct unrhdr64 pipeino_unr;
|
2011-10-05 16:56:06 +00:00
|
|
|
static dev_t pipedev_ino;
|
1997-08-05 00:02:08 +00:00
|
|
|
|
2002-02-27 11:27:48 +00:00
|
|
|
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
|
|
|
|
|
|
|
|
static void
|
|
|
|
pipeinit(void *dummy __unused)
|
|
|
|
{
|
2003-08-13 20:01:38 +00:00
|
|
|
|
2007-05-27 17:33:10 +00:00
|
|
|
pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
|
|
|
|
pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
UMA_ALIGN_PTR, 0);
|
2003-08-13 20:01:38 +00:00
|
|
|
KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
|
2018-11-20 14:59:27 +00:00
|
|
|
new_unrhdr64(&pipeino_unr, 1);
|
2011-10-05 16:56:06 +00:00
|
|
|
pipedev_ino = devfs_alloc_cdp_inode();
|
|
|
|
KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
|
2002-02-27 11:27:48 +00:00
|
|
|
}
|
|
|
|
|
2004-08-02 00:18:36 +00:00
|
|
|
static int
|
|
|
|
pipe_zone_ctor(void *mem, int size, void *arg, int flags)
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
{
|
|
|
|
struct pipepair *pp;
|
|
|
|
struct pipe *rpipe, *wpipe;
|
|
|
|
|
|
|
|
KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
|
|
|
|
|
|
|
|
pp = (struct pipepair *)mem;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We zero both pipe endpoints to make sure all the kmem pointers
|
|
|
|
* are NULL, flag fields are zero'd, etc. We timestamp both
|
|
|
|
* endpoints with the same time.
|
|
|
|
*/
|
|
|
|
rpipe = &pp->pp_rpipe;
|
|
|
|
bzero(rpipe, sizeof(*rpipe));
|
|
|
|
vfs_timestamp(&rpipe->pipe_ctime);
|
|
|
|
rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
|
|
|
|
|
|
|
|
wpipe = &pp->pp_wpipe;
|
|
|
|
bzero(wpipe, sizeof(*wpipe));
|
|
|
|
wpipe->pipe_ctime = rpipe->pipe_ctime;
|
|
|
|
wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
|
|
|
|
|
|
|
|
rpipe->pipe_peer = wpipe;
|
|
|
|
rpipe->pipe_pair = pp;
|
|
|
|
wpipe->pipe_peer = rpipe;
|
|
|
|
wpipe->pipe_pair = pp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark both endpoints as present; they will later get free'd
|
|
|
|
* one at a time. When both are free'd, then the whole pair
|
|
|
|
* is released.
|
|
|
|
*/
|
2008-05-23 11:14:03 +00:00
|
|
|
rpipe->pipe_present = PIPE_ACTIVE;
|
|
|
|
wpipe->pipe_present = PIPE_ACTIVE;
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Eventually, the MAC Framework may initialize the label
|
|
|
|
* in ctor or init, but for now we do it elswhere to avoid
|
|
|
|
* blocking in ctor or init.
|
|
|
|
*/
|
|
|
|
pp->pp_label = NULL;
|
|
|
|
|
2004-08-02 00:18:36 +00:00
|
|
|
return (0);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
}
|
|
|
|
|
2004-08-02 00:18:36 +00:00
|
|
|
static int
|
|
|
|
pipe_zone_init(void *mem, int size, int flags)
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
{
|
|
|
|
struct pipepair *pp;
|
|
|
|
|
|
|
|
KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
|
|
|
|
|
|
|
|
pp = (struct pipepair *)mem;
|
|
|
|
|
2015-01-21 16:32:54 +00:00
|
|
|
mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_NEW);
|
2004-08-02 00:18:36 +00:00
|
|
|
return (0);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pipe_zone_fini(void *mem, int size)
|
|
|
|
{
|
|
|
|
struct pipepair *pp;
|
|
|
|
|
|
|
|
KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
|
|
|
|
|
|
|
|
pp = (struct pipepair *)mem;
|
|
|
|
|
|
|
|
mtx_destroy(&pp->pp_mtx);
|
|
|
|
}
|
|
|
|
|
2014-05-02 00:52:13 +00:00
|
|
|
static void
|
2012-02-23 18:37:30 +00:00
|
|
|
pipe_paircreate(struct thread *td, struct pipepair **p_pp)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
struct pipepair *pp;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe *rpipe, *wpipe;
|
1997-08-05 00:02:08 +00:00
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
*p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
#ifdef MAC
|
|
|
|
/*
|
2004-02-25 23:30:56 +00:00
|
|
|
* The MAC label is shared between the connected endpoints. As a
|
2007-10-24 19:04:04 +00:00
|
|
|
* result mac_pipe_init() and mac_pipe_create() are called once
|
2004-02-25 23:30:56 +00:00
|
|
|
* for the pair, and not on the endpoints.
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
*/
|
2007-10-24 19:04:04 +00:00
|
|
|
mac_pipe_init(pp);
|
|
|
|
mac_pipe_create(td->td_ucred, pp);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
#endif
|
|
|
|
rpipe = &pp->pp_rpipe;
|
|
|
|
wpipe = &pp->pp_wpipe;
|
|
|
|
|
2009-06-10 20:59:32 +00:00
|
|
|
knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe));
|
|
|
|
knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe));
|
2005-01-17 07:56:28 +00:00
|
|
|
|
2004-08-16 01:27:24 +00:00
|
|
|
/* Only the forward direction pipe is backed by default */
|
2014-05-02 00:52:13 +00:00
|
|
|
pipe_create(rpipe, 1);
|
|
|
|
pipe_create(wpipe, 0);
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
rpipe->pipe_state |= PIPE_DIRECTOK;
|
|
|
|
wpipe->pipe_state |= PIPE_DIRECTOK;
|
2012-02-23 18:37:30 +00:00
|
|
|
}
|
|
|
|
|
2014-05-02 00:52:13 +00:00
|
|
|
void
|
2012-02-23 18:37:30 +00:00
|
|
|
pipe_named_ctor(struct pipe **ppipe, struct thread *td)
|
|
|
|
{
|
|
|
|
struct pipepair *pp;
|
|
|
|
|
2014-05-02 00:52:13 +00:00
|
|
|
pipe_paircreate(td, &pp);
|
2012-02-23 18:37:30 +00:00
|
|
|
pp->pp_rpipe.pipe_state |= PIPE_NAMED;
|
|
|
|
*ppipe = &pp->pp_rpipe;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pipe_dtor(struct pipe *dpipe)
|
|
|
|
{
|
2015-02-03 10:29:40 +00:00
|
|
|
struct pipe *peer;
|
2012-02-23 18:37:30 +00:00
|
|
|
|
2015-02-03 10:29:40 +00:00
|
|
|
peer = (dpipe->pipe_state & PIPE_NAMED) != 0 ? dpipe->pipe_peer : NULL;
|
2012-02-23 18:37:30 +00:00
|
|
|
funsetown(&dpipe->pipe_sigio);
|
|
|
|
pipeclose(dpipe);
|
2015-02-03 10:29:40 +00:00
|
|
|
if (peer != NULL) {
|
|
|
|
funsetown(&peer->pipe_sigio);
|
|
|
|
pipeclose(peer);
|
2012-02-23 18:37:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let
|
|
|
|
* the zone pick up the pieces via pipeclose().
|
|
|
|
*/
|
|
|
|
int
|
2015-07-29 17:18:27 +00:00
|
|
|
kern_pipe(struct thread *td, int fildes[2], int flags, struct filecaps *fcaps1,
|
|
|
|
struct filecaps *fcaps2)
|
2012-02-23 18:37:30 +00:00
|
|
|
{
|
|
|
|
struct file *rf, *wf;
|
|
|
|
struct pipe *rpipe, *wpipe;
|
|
|
|
struct pipepair *pp;
|
2012-04-16 21:22:02 +00:00
|
|
|
int fd, fflags, error;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2014-05-02 00:52:13 +00:00
|
|
|
pipe_paircreate(td, &pp);
|
2012-02-23 18:37:30 +00:00
|
|
|
rpipe = &pp->pp_rpipe;
|
|
|
|
wpipe = &pp->pp_wpipe;
|
2015-07-29 17:18:27 +00:00
|
|
|
error = falloc_caps(td, &rf, &fd, flags, fcaps1);
|
2001-01-11 00:13:54 +00:00
|
|
|
if (error) {
|
|
|
|
pipeclose(rpipe);
|
|
|
|
pipeclose(wpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
2015-07-29 17:18:27 +00:00
|
|
|
/* An extra reference on `rf' has been held for us by falloc_caps(). */
|
2008-11-11 14:55:59 +00:00
|
|
|
fildes[0] = fd;
|
2001-01-11 00:13:54 +00:00
|
|
|
|
2012-04-16 21:22:02 +00:00
|
|
|
fflags = FREAD | FWRITE;
|
|
|
|
if ((flags & O_NONBLOCK) != 0)
|
|
|
|
fflags |= FNONBLOCK;
|
|
|
|
|
2001-01-08 22:14:48 +00:00
|
|
|
/*
|
|
|
|
* Warning: once we've gotten past allocation of the fd for the
|
|
|
|
* read-side, we can only drop the read side via fdrop() in order
|
|
|
|
* to avoid races against processes which manage to dup() the read
|
|
|
|
* side while we are blocked trying to allocate the write side.
|
|
|
|
*/
|
2012-04-16 21:22:02 +00:00
|
|
|
finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops);
|
2015-07-29 17:18:27 +00:00
|
|
|
error = falloc_caps(td, &wf, &fd, flags, fcaps2);
|
2001-01-11 00:13:54 +00:00
|
|
|
if (error) {
|
2015-04-11 15:40:28 +00:00
|
|
|
fdclose(td, rf, fildes[0]);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(rf, td);
|
2001-01-11 00:13:54 +00:00
|
|
|
/* rpipe has been closed by fdrop(). */
|
|
|
|
pipeclose(wpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
2015-07-29 17:18:27 +00:00
|
|
|
/* An extra reference on `wf' has been held for us by falloc_caps(). */
|
2012-04-16 21:22:02 +00:00
|
|
|
finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops);
|
2003-10-19 20:41:07 +00:00
|
|
|
fdrop(wf, td);
|
2008-11-11 14:55:59 +00:00
|
|
|
fildes[1] = fd;
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(rf, td);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2016-06-22 21:18:19 +00:00
|
|
|
#ifdef COMPAT_FREEBSD10
|
2008-11-11 14:55:59 +00:00
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2016-06-22 21:18:19 +00:00
|
|
|
freebsd10_pipe(struct thread *td, struct freebsd10_pipe_args *uap __unused)
|
2008-11-11 14:55:59 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int fildes[2];
|
|
|
|
|
2015-07-29 17:18:27 +00:00
|
|
|
error = kern_pipe(td, fildes, 0, NULL, NULL);
|
2008-11-11 14:55:59 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2013-02-17 11:48:16 +00:00
|
|
|
|
2008-11-11 14:55:59 +00:00
|
|
|
td->td_retval[0] = fildes[0];
|
|
|
|
td->td_retval[1] = fildes[1];
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
2016-06-22 21:18:19 +00:00
|
|
|
#endif
|
2008-11-11 14:55:59 +00:00
|
|
|
|
2013-05-01 22:42:42 +00:00
|
|
|
int
|
|
|
|
sys_pipe2(struct thread *td, struct pipe2_args *uap)
|
|
|
|
{
|
|
|
|
int error, fildes[2];
|
|
|
|
|
|
|
|
if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK))
|
|
|
|
return (EINVAL);
|
2015-07-29 17:18:27 +00:00
|
|
|
error = kern_pipe(td, fildes, uap->flags, NULL, NULL);
|
2013-05-01 22:42:42 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = copyout(fildes, uap->fildes, 2 * sizeof(int));
|
|
|
|
if (error) {
|
|
|
|
(void)kern_close(td, fildes[0]);
|
|
|
|
(void)kern_close(td, fildes[1]);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1996-02-04 22:09:12 +00:00
|
|
|
/*
|
|
|
|
* Allocate kva for pipe circular buffer, the space is pageable
|
2001-05-08 09:09:18 +00:00
|
|
|
* This routine will 'realloc' the size of a pipe safely, if it fails
|
|
|
|
* it will retain the old buffer.
|
|
|
|
* If it fails it will return ENOMEM.
|
1996-02-04 22:09:12 +00:00
|
|
|
*/
|
2001-05-08 09:09:18 +00:00
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipespace_new(struct pipe *cpipe, int size)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
caddr_t buffer;
|
2004-08-16 01:27:24 +00:00
|
|
|
int error, cnt, firstseg;
|
2003-07-08 04:02:31 +00:00
|
|
|
static int curfail = 0;
|
|
|
|
static struct timeval lastfail;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
|
2004-08-16 01:27:24 +00:00
|
|
|
KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
|
|
|
|
("pipespace: resize of direct writes not allowed"));
|
|
|
|
retry:
|
|
|
|
cnt = cpipe->pipe_buffer.cnt;
|
|
|
|
if (cnt > size)
|
|
|
|
size = cnt;
|
2001-07-04 16:20:28 +00:00
|
|
|
|
2003-08-11 05:51:51 +00:00
|
|
|
size = round_page(size);
|
|
|
|
buffer = (caddr_t) vm_map_min(pipe_map);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2018-11-06 21:57:03 +00:00
|
|
|
error = vm_map_find(pipe_map, NULL, 0, (vm_offset_t *)&buffer, size, 0,
|
|
|
|
VMFS_ANY_SPACE, VM_PROT_RW, VM_PROT_RW, 0);
|
2001-05-08 09:09:18 +00:00
|
|
|
if (error != KERN_SUCCESS) {
|
2004-08-16 01:27:24 +00:00
|
|
|
if ((cpipe->pipe_buffer.buffer == NULL) &&
|
|
|
|
(size > SMALL_PIPE_SIZE)) {
|
|
|
|
size = SMALL_PIPE_SIZE;
|
|
|
|
pipefragretry++;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
if (cpipe->pipe_buffer.buffer == NULL) {
|
|
|
|
pipeallocfail++;
|
|
|
|
if (ppsratecheck(&lastfail, &curfail, 1))
|
|
|
|
printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
|
|
|
|
} else {
|
|
|
|
piperesizefail++;
|
|
|
|
}
|
2001-05-08 09:09:18 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
2004-08-16 01:27:24 +00:00
|
|
|
/* copy data, then free old resources if we're resizing */
|
|
|
|
if (cnt > 0) {
|
|
|
|
if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
|
|
|
|
firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
|
|
|
|
bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
|
|
|
|
buffer, firstseg);
|
|
|
|
if ((cnt - firstseg) > 0)
|
|
|
|
bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
|
|
|
|
cpipe->pipe_buffer.in);
|
|
|
|
} else {
|
|
|
|
bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
|
|
|
|
buffer, cnt);
|
|
|
|
}
|
|
|
|
}
|
2001-05-08 09:09:18 +00:00
|
|
|
pipe_free_kmem(cpipe);
|
|
|
|
cpipe->pipe_buffer.buffer = buffer;
|
|
|
|
cpipe->pipe_buffer.size = size;
|
2004-08-16 01:27:24 +00:00
|
|
|
cpipe->pipe_buffer.in = cnt;
|
2001-05-08 09:09:18 +00:00
|
|
|
cpipe->pipe_buffer.out = 0;
|
2004-08-16 01:27:24 +00:00
|
|
|
cpipe->pipe_buffer.cnt = cnt;
|
2009-03-10 21:28:43 +00:00
|
|
|
atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size);
|
2001-05-08 09:09:18 +00:00
|
|
|
return (0);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
2004-07-23 14:11:04 +00:00
|
|
|
/*
|
|
|
|
* Wrapper for pipespace_new() that performs locking assertions.
|
|
|
|
*/
|
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipespace(struct pipe *cpipe, int size)
|
2004-07-23 14:11:04 +00:00
|
|
|
{
|
|
|
|
|
2004-08-03 02:59:15 +00:00
|
|
|
KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
|
|
|
|
("Unlocked pipe passed to pipespace"));
|
2004-07-23 14:11:04 +00:00
|
|
|
return (pipespace_new(cpipe, size));
|
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* lock a pipe for I/O, blocking other access
|
|
|
|
*/
|
|
|
|
static __inline int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipelock(struct pipe *cpipe, int catch)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
1996-01-31 06:00:45 +00:00
|
|
|
int error;
|
2001-05-08 09:09:18 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
|
|
|
|
while (cpipe->pipe_state & PIPE_LOCKFL) {
|
1996-01-28 23:38:26 +00:00
|
|
|
cpipe->pipe_state |= PIPE_LWANT;
|
2002-02-27 07:35:59 +00:00
|
|
|
error = msleep(cpipe, PIPE_MTX(cpipe),
|
|
|
|
catch ? (PRIBIO | PCATCH) : PRIBIO,
|
2001-05-17 19:47:09 +00:00
|
|
|
"pipelk", 0);
|
2004-01-11 19:54:45 +00:00
|
|
|
if (error != 0)
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2002-02-27 07:35:59 +00:00
|
|
|
cpipe->pipe_state |= PIPE_LOCKFL;
|
2001-05-17 19:47:09 +00:00
|
|
|
return (0);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unlock a pipe I/O lock
|
|
|
|
*/
|
|
|
|
static __inline void
|
2018-06-01 13:26:45 +00:00
|
|
|
pipeunlock(struct pipe *cpipe)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
|
2004-08-03 02:59:15 +00:00
|
|
|
KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
|
|
|
|
("Unlocked pipe passed to pipeunlock"));
|
2002-02-27 07:35:59 +00:00
|
|
|
cpipe->pipe_state &= ~PIPE_LOCKFL;
|
1996-01-28 23:38:26 +00:00
|
|
|
if (cpipe->pipe_state & PIPE_LWANT) {
|
|
|
|
cpipe->pipe_state &= ~PIPE_LWANT;
|
1996-02-22 03:33:52 +00:00
|
|
|
wakeup(cpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
When a thread is blocked in direct write state, it only sets PIPE_DIRECTW
flag but not PIPE_WANTW, but FIFO pipe code does not understand this internal
state, when a FIFO peer reader closes the pipe, it wants to notify the writer,
it checks PIPE_WANTW, if not set, it skips calling wakeup(), so blocked writer
never noticed the case, but in general, the writer should return from the
syscall with EPIPE error code and may get SIGPIPE signal. Setting the
PIPE_WANTW fixed problem, or you can turn off direct write, it should fix the
problem too. This bug is found by PR/170203.
Another bug in FIFO pipe code is when peer closes the pipe, another end which
is being blocked in select() or poll() is not notified, it missed to call
pipeselwakeup().
Third problem is found in poll regression test, the existing code can not
pass 6b,6c,6d tests, but FreeBSD-4 works. This commit does not fix the
problem, I still need to study more to find the cause.
PR: 170203
Tested by: Garrett Copper < yanegomi at gmail dot com >
2012-07-31 02:00:37 +00:00
|
|
|
void
|
2018-06-01 13:26:45 +00:00
|
|
|
pipeselwakeup(struct pipe *cpipe)
|
1996-02-11 22:09:50 +00:00
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
|
2004-02-26 00:18:22 +00:00
|
|
|
PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
|
1996-02-11 22:09:50 +00:00
|
|
|
if (cpipe->pipe_state & PIPE_SEL) {
|
2003-11-09 09:17:26 +00:00
|
|
|
selwakeuppri(&cpipe->pipe_sel, PSOCK);
|
2007-12-16 06:21:20 +00:00
|
|
|
if (!SEL_WAITING(&cpipe->pipe_sel))
|
|
|
|
cpipe->pipe_state &= ~PIPE_SEL;
|
1996-02-11 22:09:50 +00:00
|
|
|
}
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
|
2002-05-01 20:44:46 +00:00
|
|
|
pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
|
2004-08-15 06:24:42 +00:00
|
|
|
KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
|
1996-02-11 22:09:50 +00:00
|
|
|
}
|
|
|
|
|
2004-02-22 23:00:14 +00:00
|
|
|
/*
|
|
|
|
* Initialize and allocate VM and memory for pipe. The structure
|
|
|
|
* will start out zero'd from the ctor, so we just manage the kmem.
|
|
|
|
*/
|
2014-05-02 00:52:13 +00:00
|
|
|
static void
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_create(struct pipe *pipe, int backing)
|
2004-02-22 23:00:14 +00:00
|
|
|
{
|
|
|
|
|
2004-08-16 01:27:24 +00:00
|
|
|
if (backing) {
|
2014-05-02 00:52:13 +00:00
|
|
|
/*
|
|
|
|
* Note that these functions can fail if pipe map is exhausted
|
|
|
|
* (as a result of too many pipes created), but we ignore the
|
|
|
|
* error as it is not fatal and could be provoked by
|
|
|
|
* unprivileged users. The only consequence is worse performance
|
|
|
|
* with given pipe.
|
|
|
|
*/
|
2004-08-16 01:27:24 +00:00
|
|
|
if (amountpipekva > maxpipekva / 2)
|
2014-05-02 00:52:13 +00:00
|
|
|
(void)pipespace_new(pipe, SMALL_PIPE_SIZE);
|
2004-08-16 01:27:24 +00:00
|
|
|
else
|
2014-05-02 00:52:13 +00:00
|
|
|
(void)pipespace_new(pipe, PIPE_SIZE);
|
2004-08-16 01:27:24 +00:00
|
|
|
}
|
2014-05-02 00:52:13 +00:00
|
|
|
|
2018-11-20 14:59:27 +00:00
|
|
|
pipe->pipe_ino = alloc_unr64(&pipeino_unr);
|
2004-02-22 23:00:14 +00:00
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
|
|
|
|
int flags, struct thread *td)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2012-02-23 18:37:30 +00:00
|
|
|
struct pipe *rpipe;
|
1999-06-05 03:53:57 +00:00
|
|
|
int error;
|
1996-01-28 23:38:26 +00:00
|
|
|
int nread = 0;
|
2012-03-04 15:09:01 +00:00
|
|
|
int size;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
rpipe = fp->f_data;
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
++rpipe->pipe_busy;
|
1999-06-05 03:53:57 +00:00
|
|
|
error = pipelock(rpipe, 1);
|
|
|
|
if (error)
|
|
|
|
goto unlocked_error;
|
|
|
|
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_pipe_check_read(active_cred, rpipe->pipe_pair);
|
2002-08-13 02:47:13 +00:00
|
|
|
if (error)
|
|
|
|
goto locked_error;
|
|
|
|
#endif
|
2004-08-16 01:27:24 +00:00
|
|
|
if (amountpipekva > (3 * maxpipekva) / 4) {
|
|
|
|
if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
|
|
|
|
(rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
|
|
|
|
(rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
|
|
|
|
(piperesizeallowed == 1)) {
|
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
pipespace(rpipe, SMALL_PIPE_SIZE);
|
|
|
|
PIPE_LOCK(rpipe);
|
|
|
|
}
|
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
while (uio->uio_resid) {
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* normal pipe buffer receive
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
if (rpipe->pipe_buffer.cnt > 0) {
|
1996-10-11 02:27:30 +00:00
|
|
|
size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
|
1996-01-28 23:38:26 +00:00
|
|
|
if (size > rpipe->pipe_buffer.cnt)
|
|
|
|
size = rpipe->pipe_buffer.cnt;
|
2012-02-21 01:05:12 +00:00
|
|
|
if (size > uio->uio_resid)
|
2012-03-04 15:09:01 +00:00
|
|
|
size = uio->uio_resid;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2003-06-09 21:57:48 +00:00
|
|
|
error = uiomove(
|
|
|
|
&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
|
|
|
|
size, uio);
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
if (error)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
2001-05-17 19:47:09 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
rpipe->pipe_buffer.out += size;
|
|
|
|
if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
|
|
|
|
rpipe->pipe_buffer.out = 0;
|
|
|
|
|
|
|
|
rpipe->pipe_buffer.cnt -= size;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is no more to read in the pipe, reset
|
|
|
|
* its pointers to the beginning. This improves
|
|
|
|
* cache hit stats.
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_buffer.cnt == 0) {
|
|
|
|
rpipe->pipe_buffer.in = 0;
|
|
|
|
rpipe->pipe_buffer.out = 0;
|
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
nread += size;
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* Direct copy, bypassing a kernel buffer.
|
|
|
|
*/
|
|
|
|
} else if ((size = rpipe->pipe_map.cnt) &&
|
1999-06-05 03:53:57 +00:00
|
|
|
(rpipe->pipe_state & PIPE_DIRECTW)) {
|
2012-02-21 01:05:12 +00:00
|
|
|
if (size > uio->uio_resid)
|
1996-10-11 02:27:30 +00:00
|
|
|
size = (u_int) uio->uio_resid;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
Revise the direct or optimized case to use uiomove_fromphys() by the reader
instead of ephemeral mappings using pmap_qenter() by the writer. The
writer is still, however, responsible for wiring the pages, just not
mapping them. Consequently, the allocation of KVA for the direct case is
unnecessary. Remove it and the sysctls limiting it, i.e.,
kern.ipc.maxpipekvawired and kern.ipc.amountpipekvawired. The number
of temporarily wired pages is still, however, limited by
kern.ipc.maxpipekva.
Note: On platforms lacking a direct virtual-to-physical mapping,
uiomove_fromphys() uses sf_bufs to cache ephemeral mappings. Thus,
the number of available sf_bufs can influence the performance of pipes
on platforms such i386. Surprisingly, I saw the greatest gain from this
change on such a machine: lmbench's pipe bandwidth result increased from
~1050MB/s to ~1850MB/s on my 2.4GHz, 400MHz FSB P4 Xeon.
2004-03-27 19:50:23 +00:00
|
|
|
error = uiomove_fromphys(rpipe->pipe_map.ms,
|
|
|
|
rpipe->pipe_map.pos, size, uio);
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
nread += size;
|
|
|
|
rpipe->pipe_map.pos += size;
|
|
|
|
rpipe->pipe_map.cnt -= size;
|
|
|
|
if (rpipe->pipe_map.cnt == 0) {
|
2019-06-29 16:05:52 +00:00
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
1996-02-04 19:56:35 +00:00
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-01-28 23:38:26 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* detect EOF condition
|
2001-05-17 19:47:09 +00:00
|
|
|
* read returns 0 on EOF, no need to set error
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
2001-05-17 19:47:09 +00:00
|
|
|
if (rpipe->pipe_state & PIPE_EOF)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
1999-02-04 23:50:49 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* If the "write-side" has been blocked, wake it up now.
|
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_WANTW) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
1999-02-04 23:50:49 +00:00
|
|
|
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Break if some data was read.
|
1999-02-04 23:50:49 +00:00
|
|
|
*/
|
1999-06-05 03:53:57 +00:00
|
|
|
if (nread > 0)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
1996-07-04 04:36:56 +00:00
|
|
|
|
1999-02-04 23:50:49 +00:00
|
|
|
/*
|
2004-01-11 19:54:45 +00:00
|
|
|
* Unlock the pipe buffer for our remaining processing.
|
2003-06-09 21:57:48 +00:00
|
|
|
* We will either break out with an error or we will
|
|
|
|
* sleep and relock to loop.
|
1999-02-04 23:50:49 +00:00
|
|
|
*/
|
1999-06-05 03:53:57 +00:00
|
|
|
pipeunlock(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Handle non-blocking mode operation or
|
|
|
|
* wait for more data.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
2001-05-17 19:47:09 +00:00
|
|
|
if (fp->f_flag & FNONBLOCK) {
|
1999-06-05 03:53:57 +00:00
|
|
|
error = EAGAIN;
|
2001-05-17 19:47:09 +00:00
|
|
|
} else {
|
1999-06-05 03:53:57 +00:00
|
|
|
rpipe->pipe_state |= PIPE_WANTR;
|
2002-02-27 07:35:59 +00:00
|
|
|
if ((error = msleep(rpipe, PIPE_MTX(rpipe),
|
|
|
|
PRIBIO | PCATCH,
|
2001-05-24 18:06:22 +00:00
|
|
|
"piperd", 0)) == 0)
|
1999-06-05 03:53:57 +00:00
|
|
|
error = pipelock(rpipe, 1);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
1999-06-05 03:53:57 +00:00
|
|
|
if (error)
|
|
|
|
goto unlocked_error;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
locked_error:
|
|
|
|
#endif
|
1999-06-05 03:53:57 +00:00
|
|
|
pipeunlock(rpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
/* XXX: should probably do this before getting any locks. */
|
1997-03-22 06:53:45 +00:00
|
|
|
if (error == 0)
|
1999-12-26 13:04:52 +00:00
|
|
|
vfs_timestamp(&rpipe->pipe_atime);
|
1999-06-05 03:53:57 +00:00
|
|
|
unlocked_error:
|
1996-01-28 23:38:26 +00:00
|
|
|
--rpipe->pipe_busy;
|
1999-06-05 03:53:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* PIPE_WANT processing only makes sense if pipe_busy is 0.
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
|
|
|
|
rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
|
|
|
|
wakeup(rpipe);
|
|
|
|
} else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
|
|
|
|
/*
|
1999-06-05 03:53:57 +00:00
|
|
|
* Handle write blocking hysteresis.
|
1996-01-28 23:38:26 +00:00
|
|
|
*/
|
|
|
|
if (rpipe->pipe_state & PIPE_WANTW) {
|
|
|
|
rpipe->pipe_state &= ~PIPE_WANTW;
|
|
|
|
wakeup(rpipe);
|
|
|
|
}
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
|
1996-03-25 01:48:28 +00:00
|
|
|
if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(rpipe);
|
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* Map the sending processes' buffer into kernel space and wire it.
|
|
|
|
* This is similar to a physical write operation.
|
|
|
|
*/
|
1996-01-28 23:38:26 +00:00
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
1996-10-11 02:27:30 +00:00
|
|
|
u_int size;
|
2010-12-17 22:41:22 +00:00
|
|
|
int i;
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2002-02-27 18:49:58 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
|
2004-08-16 01:27:24 +00:00
|
|
|
KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
|
|
|
|
("Clone attempt on non-direct write pipe!"));
|
2001-07-04 16:20:28 +00:00
|
|
|
|
2012-02-21 01:05:12 +00:00
|
|
|
if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size)
|
|
|
|
size = wpipe->pipe_buffer.size;
|
|
|
|
else
|
|
|
|
size = uio->uio_iov->iov_len;
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2010-12-25 21:26:56 +00:00
|
|
|
if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
|
|
|
|
(vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ,
|
|
|
|
wpipe->pipe_map.ms, PIPENPAGES)) < 0)
|
2009-06-10 10:31:11 +00:00
|
|
|
return (EFAULT);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* set up the control block
|
|
|
|
*/
|
|
|
|
wpipe->pipe_map.npages = i;
|
2001-05-17 19:47:09 +00:00
|
|
|
wpipe->pipe_map.pos =
|
|
|
|
((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
|
1996-02-04 19:56:35 +00:00
|
|
|
wpipe->pipe_map.cnt = size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* and update the uio data
|
|
|
|
*/
|
|
|
|
|
|
|
|
uio->uio_iov->iov_len -= size;
|
2002-10-11 14:58:34 +00:00
|
|
|
uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
|
1996-02-04 19:56:35 +00:00
|
|
|
if (uio->uio_iov->iov_len == 0)
|
|
|
|
uio->uio_iov++;
|
|
|
|
uio->uio_resid -= size;
|
|
|
|
uio->uio_offset += size;
|
2001-05-17 19:47:09 +00:00
|
|
|
return (0);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-06-29 16:05:52 +00:00
|
|
|
* Unwire the process buffer.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
|
|
|
static void
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_destroy_write_buffer(struct pipe *wpipe)
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
|
Revise the direct or optimized case to use uiomove_fromphys() by the reader
instead of ephemeral mappings using pmap_qenter() by the writer. The
writer is still, however, responsible for wiring the pages, just not
mapping them. Consequently, the allocation of KVA for the direct case is
unnecessary. Remove it and the sysctls limiting it, i.e.,
kern.ipc.maxpipekvawired and kern.ipc.amountpipekvawired. The number
of temporarily wired pages is still, however, limited by
kern.ipc.maxpipekva.
Note: On platforms lacking a direct virtual-to-physical mapping,
uiomove_fromphys() uses sf_bufs to cache ephemeral mappings. Thus,
the number of available sf_bufs can influence the performance of pipes
on platforms such i386. Surprisingly, I saw the greatest gain from this
change on such a machine: lmbench's pipe bandwidth result increased from
~1050MB/s to ~1850MB/s on my 2.4GHz, 400MHz FSB P4 Xeon.
2004-03-27 19:50:23 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
|
2019-06-29 16:05:52 +00:00
|
|
|
KASSERT((wpipe->pipe_state & PIPE_DIRECTW) != 0,
|
|
|
|
("%s: PIPE_DIRECTW not set on %p", __func__, wpipe));
|
|
|
|
|
|
|
|
wpipe->pipe_state &= ~PIPE_DIRECTW;
|
2010-12-17 22:41:22 +00:00
|
|
|
vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
|
2002-03-05 00:51:03 +00:00
|
|
|
wpipe->pipe_map.npages = 0;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the case of a signal, the writing process might go away. This
|
|
|
|
* code copies the data into the circular buffer so that the source
|
|
|
|
* pages can be freed without loss of data.
|
|
|
|
*/
|
|
|
|
static void
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_clone_write_buffer(struct pipe *wpipe)
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
Revise the direct or optimized case to use uiomove_fromphys() by the reader
instead of ephemeral mappings using pmap_qenter() by the writer. The
writer is still, however, responsible for wiring the pages, just not
mapping them. Consequently, the allocation of KVA for the direct case is
unnecessary. Remove it and the sysctls limiting it, i.e.,
kern.ipc.maxpipekvawired and kern.ipc.amountpipekvawired. The number
of temporarily wired pages is still, however, limited by
kern.ipc.maxpipekva.
Note: On platforms lacking a direct virtual-to-physical mapping,
uiomove_fromphys() uses sf_bufs to cache ephemeral mappings. Thus,
the number of available sf_bufs can influence the performance of pipes
on platforms such i386. Surprisingly, I saw the greatest gain from this
change on such a machine: lmbench's pipe bandwidth result increased from
~1050MB/s to ~1850MB/s on my 2.4GHz, 400MHz FSB P4 Xeon.
2004-03-27 19:50:23 +00:00
|
|
|
struct uio uio;
|
|
|
|
struct iovec iov;
|
1996-02-04 19:56:35 +00:00
|
|
|
int size;
|
|
|
|
int pos;
|
|
|
|
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
|
2019-06-29 16:05:52 +00:00
|
|
|
KASSERT((wpipe->pipe_state & PIPE_DIRECTW) != 0,
|
|
|
|
("%s: PIPE_DIRECTW not set on %p", __func__, wpipe));
|
|
|
|
|
1996-02-04 19:56:35 +00:00
|
|
|
size = wpipe->pipe_map.cnt;
|
|
|
|
pos = wpipe->pipe_map.pos;
|
|
|
|
|
|
|
|
wpipe->pipe_buffer.in = size;
|
|
|
|
wpipe->pipe_buffer.out = 0;
|
|
|
|
wpipe->pipe_buffer.cnt = size;
|
|
|
|
|
2003-09-06 21:02:10 +00:00
|
|
|
PIPE_UNLOCK(wpipe);
|
Revise the direct or optimized case to use uiomove_fromphys() by the reader
instead of ephemeral mappings using pmap_qenter() by the writer. The
writer is still, however, responsible for wiring the pages, just not
mapping them. Consequently, the allocation of KVA for the direct case is
unnecessary. Remove it and the sysctls limiting it, i.e.,
kern.ipc.maxpipekvawired and kern.ipc.amountpipekvawired. The number
of temporarily wired pages is still, however, limited by
kern.ipc.maxpipekva.
Note: On platforms lacking a direct virtual-to-physical mapping,
uiomove_fromphys() uses sf_bufs to cache ephemeral mappings. Thus,
the number of available sf_bufs can influence the performance of pipes
on platforms such i386. Surprisingly, I saw the greatest gain from this
change on such a machine: lmbench's pipe bandwidth result increased from
~1050MB/s to ~1850MB/s on my 2.4GHz, 400MHz FSB P4 Xeon.
2004-03-27 19:50:23 +00:00
|
|
|
iov.iov_base = wpipe->pipe_buffer.buffer;
|
|
|
|
iov.iov_len = size;
|
|
|
|
uio.uio_iov = &iov;
|
|
|
|
uio.uio_iovcnt = 1;
|
|
|
|
uio.uio_offset = 0;
|
|
|
|
uio.uio_resid = size;
|
|
|
|
uio.uio_segflg = UIO_SYSSPACE;
|
|
|
|
uio.uio_rw = UIO_READ;
|
|
|
|
uio.uio_td = curthread;
|
|
|
|
uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
|
2003-09-06 21:02:10 +00:00
|
|
|
PIPE_LOCK(wpipe);
|
Revise the direct or optimized case to use uiomove_fromphys() by the reader
instead of ephemeral mappings using pmap_qenter() by the writer. The
writer is still, however, responsible for wiring the pages, just not
mapping them. Consequently, the allocation of KVA for the direct case is
unnecessary. Remove it and the sysctls limiting it, i.e.,
kern.ipc.maxpipekvawired and kern.ipc.amountpipekvawired. The number
of temporarily wired pages is still, however, limited by
kern.ipc.maxpipekva.
Note: On platforms lacking a direct virtual-to-physical mapping,
uiomove_fromphys() uses sf_bufs to cache ephemeral mappings. Thus,
the number of available sf_bufs can influence the performance of pipes
on platforms such i386. Surprisingly, I saw the greatest gain from this
change on such a machine: lmbench's pipe bandwidth result increased from
~1050MB/s to ~1850MB/s on my 2.4GHz, 400MHz FSB P4 Xeon.
2004-03-27 19:50:23 +00:00
|
|
|
pipe_destroy_write_buffer(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This implements the pipe buffer write mechanism. Note that only
|
|
|
|
* a direct write OR a normal pipe write can be pending at any given time.
|
|
|
|
* If there are any characters in the pipe buffer, the direct write will
|
|
|
|
* be deferred until the receiving process grabs all of the bytes from
|
|
|
|
* the pipe buffer. Then the direct mapping write is set-up.
|
|
|
|
*/
|
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_direct_write(struct pipe *wpipe, struct uio *uio)
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
|
|
|
int error;
|
2001-05-08 09:09:18 +00:00
|
|
|
|
1996-02-07 06:41:56 +00:00
|
|
|
retry:
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
|
2004-08-03 02:59:15 +00:00
|
|
|
error = pipelock(wpipe, 1);
|
2015-07-13 17:45:22 +00:00
|
|
|
if (error != 0)
|
|
|
|
goto error1;
|
|
|
|
if ((wpipe->pipe_state & PIPE_EOF) != 0) {
|
2004-08-03 02:59:15 +00:00
|
|
|
error = EPIPE;
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
goto error1;
|
|
|
|
}
|
2019-06-29 16:05:52 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW) {
|
2001-05-17 19:47:09 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
1996-02-07 06:41:56 +00:00
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
2007-11-19 15:05:20 +00:00
|
|
|
pipeselwakeup(wpipe);
|
1996-02-09 04:36:36 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-02-27 07:35:59 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(wpipe),
|
|
|
|
PRIBIO | PCATCH, "pipdww", 0);
|
1996-03-25 01:48:28 +00:00
|
|
|
if (error)
|
1996-02-04 19:56:35 +00:00
|
|
|
goto error1;
|
2004-08-03 02:59:15 +00:00
|
|
|
else
|
|
|
|
goto retry;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
|
1996-02-07 06:41:56 +00:00
|
|
|
if (wpipe->pipe_buffer.cnt > 0) {
|
2001-05-17 19:47:09 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
1996-02-07 06:41:56 +00:00
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
2007-11-19 15:05:20 +00:00
|
|
|
pipeselwakeup(wpipe);
|
1996-02-09 04:36:36 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-02-27 07:35:59 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(wpipe),
|
|
|
|
PRIBIO | PCATCH, "pipdwc", 0);
|
1996-03-25 01:48:28 +00:00
|
|
|
if (error)
|
|
|
|
goto error1;
|
2004-08-03 02:59:15 +00:00
|
|
|
else
|
|
|
|
goto retry;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
1996-02-07 06:41:56 +00:00
|
|
|
wpipe->pipe_state |= PIPE_DIRECTW;
|
|
|
|
|
2003-09-08 04:58:32 +00:00
|
|
|
PIPE_UNLOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
error = pipe_build_write_buffer(wpipe, uio);
|
2003-09-08 04:58:32 +00:00
|
|
|
PIPE_LOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
if (error) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_DIRECTW;
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
goto error1;
|
|
|
|
}
|
|
|
|
|
2019-06-29 16:05:52 +00:00
|
|
|
while (wpipe->pipe_map.cnt != 0) {
|
1996-02-04 19:56:35 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
pipe_destroy_write_buffer(wpipe);
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
2003-04-02 15:24:50 +00:00
|
|
|
pipeunlock(wpipe);
|
1996-03-25 01:48:28 +00:00
|
|
|
error = EPIPE;
|
|
|
|
goto error1;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-02-09 04:36:36 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
When a thread is blocked in direct write state, it only sets PIPE_DIRECTW
flag but not PIPE_WANTW, but FIFO pipe code does not understand this internal
state, when a FIFO peer reader closes the pipe, it wants to notify the writer,
it checks PIPE_WANTW, if not set, it skips calling wakeup(), so blocked writer
never noticed the case, but in general, the writer should return from the
syscall with EPIPE error code and may get SIGPIPE signal. Setting the
PIPE_WANTW fixed problem, or you can turn off direct write, it should fix the
problem too. This bug is found by PR/170203.
Another bug in FIFO pipe code is when peer closes the pipe, another end which
is being blocked in select() or poll() is not notified, it missed to call
pipeselwakeup().
Third problem is found in poll regression test, the existing code can not
pass 6b,6c,6d tests, but FreeBSD-4 works. This commit does not fix the
problem, I still need to study more to find the cause.
PR: 170203
Tested by: Garrett Copper < yanegomi at gmail dot com >
2012-07-31 02:00:37 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-02-27 07:35:59 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
|
|
|
|
"pipdwt", 0);
|
2004-08-03 02:59:15 +00:00
|
|
|
pipelock(wpipe, 0);
|
2019-06-29 16:05:52 +00:00
|
|
|
if (error != 0)
|
|
|
|
break;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
2004-02-22 23:00:14 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_EOF)
|
|
|
|
error = EPIPE;
|
2019-06-29 16:05:52 +00:00
|
|
|
if (error == EINTR || error == ERESTART)
|
1996-02-04 19:56:35 +00:00
|
|
|
pipe_clone_write_buffer(wpipe);
|
2019-06-29 16:05:52 +00:00
|
|
|
else
|
1996-02-04 19:56:35 +00:00
|
|
|
pipe_destroy_write_buffer(wpipe);
|
|
|
|
pipeunlock(wpipe);
|
2019-06-29 16:05:52 +00:00
|
|
|
KASSERT((wpipe->pipe_state & PIPE_DIRECTW) == 0,
|
|
|
|
("pipe %p leaked PIPE_DIRECTW", wpipe));
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
error1:
|
|
|
|
wakeup(wpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1996-07-04 04:36:56 +00:00
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
|
|
|
|
int flags, struct thread *td)
|
1996-02-04 19:56:35 +00:00
|
|
|
{
|
2012-03-04 15:09:01 +00:00
|
|
|
int error = 0;
|
|
|
|
int desiredsize;
|
|
|
|
ssize_t orig_resid;
|
1996-07-04 04:36:56 +00:00
|
|
|
struct pipe *wpipe, *rpipe;
|
|
|
|
|
2003-01-13 00:33:17 +00:00
|
|
|
rpipe = fp->f_data;
|
2012-02-23 18:37:30 +00:00
|
|
|
wpipe = PIPE_PEER(rpipe);
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2004-08-03 02:59:15 +00:00
|
|
|
error = pipelock(wpipe, 1);
|
|
|
|
if (error) {
|
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* detect loss of pipe read side, issue SIGPIPE if lost.
|
|
|
|
*/
|
2008-05-23 11:14:03 +00:00
|
|
|
if (wpipe->pipe_present != PIPE_ACTIVE ||
|
|
|
|
(wpipe->pipe_state & PIPE_EOF)) {
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (EPIPE);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_pipe_check_write(active_cred, wpipe->pipe_pair);
|
2002-08-13 02:47:13 +00:00
|
|
|
if (error) {
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif
|
2001-06-04 04:04:45 +00:00
|
|
|
++wpipe->pipe_busy;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2004-08-16 01:27:24 +00:00
|
|
|
/* Choose a larger size if it's advantageous */
|
|
|
|
desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
|
|
|
|
while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
|
|
|
|
if (piperesizeallowed != 1)
|
|
|
|
break;
|
|
|
|
if (amountpipekva > maxpipekva / 2)
|
|
|
|
break;
|
|
|
|
if (desiredsize == BIG_PIPE_SIZE)
|
|
|
|
break;
|
|
|
|
desiredsize = desiredsize * 2;
|
|
|
|
}
|
1996-07-13 22:52:50 +00:00
|
|
|
|
2004-08-16 01:27:24 +00:00
|
|
|
/* Choose a smaller size if we're in a OOM situation */
|
|
|
|
if ((amountpipekva > (3 * maxpipekva) / 4) &&
|
|
|
|
(wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
|
|
|
|
(wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
|
|
|
|
(piperesizeallowed == 1))
|
|
|
|
desiredsize = SMALL_PIPE_SIZE;
|
|
|
|
|
|
|
|
/* Resize if the above determined that a new size was necessary */
|
|
|
|
if ((desiredsize != wpipe->pipe_buffer.size) &&
|
|
|
|
((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
|
2004-08-03 02:59:15 +00:00
|
|
|
PIPE_UNLOCK(wpipe);
|
2004-08-16 01:27:24 +00:00
|
|
|
pipespace(wpipe, desiredsize);
|
2004-08-03 02:59:15 +00:00
|
|
|
PIPE_LOCK(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
2004-08-16 01:27:24 +00:00
|
|
|
if (wpipe->pipe_buffer.size == 0) {
|
|
|
|
/*
|
|
|
|
* This can only happen for reverse direction use of pipes
|
|
|
|
* in a complete OOM situation.
|
|
|
|
*/
|
|
|
|
error = ENOMEM;
|
|
|
|
--wpipe->pipe_busy;
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
PIPE_UNLOCK(wpipe);
|
|
|
|
return (error);
|
|
|
|
}
|
2001-06-04 04:04:45 +00:00
|
|
|
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2004-01-11 19:54:45 +00:00
|
|
|
|
1996-02-05 05:50:34 +00:00
|
|
|
orig_resid = uio->uio_resid;
|
2001-06-04 04:04:45 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
while (uio->uio_resid) {
|
1996-02-04 19:56:35 +00:00
|
|
|
int space;
|
2001-05-17 19:47:09 +00:00
|
|
|
|
2004-08-03 02:59:15 +00:00
|
|
|
pipelock(wpipe, 0);
|
|
|
|
if (wpipe->pipe_state & PIPE_EOF) {
|
|
|
|
pipeunlock(wpipe);
|
|
|
|
error = EPIPE;
|
|
|
|
break;
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#ifndef PIPE_NODIRECT
|
1996-02-04 19:56:35 +00:00
|
|
|
/*
|
|
|
|
* If the transfer is large, we can gain performance if
|
|
|
|
* we do process-to-process copies directly.
|
1996-06-17 05:15:01 +00:00
|
|
|
* If the write is non-blocking, we don't use the
|
|
|
|
* direct write mechanism.
|
2000-03-24 00:47:37 +00:00
|
|
|
*
|
|
|
|
* The direct write mechanism will detect the reader going
|
|
|
|
* away on us.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
2006-12-19 12:52:22 +00:00
|
|
|
if (uio->uio_segflg == UIO_USERSPACE &&
|
|
|
|
uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
|
|
|
|
wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
|
Revise the direct or optimized case to use uiomove_fromphys() by the reader
instead of ephemeral mappings using pmap_qenter() by the writer. The
writer is still, however, responsible for wiring the pages, just not
mapping them. Consequently, the allocation of KVA for the direct case is
unnecessary. Remove it and the sysctls limiting it, i.e.,
kern.ipc.maxpipekvawired and kern.ipc.amountpipekvawired. The number
of temporarily wired pages is still, however, limited by
kern.ipc.maxpipekva.
Note: On platforms lacking a direct virtual-to-physical mapping,
uiomove_fromphys() uses sf_bufs to cache ephemeral mappings. Thus,
the number of available sf_bufs can influence the performance of pipes
on platforms such i386. Surprisingly, I saw the greatest gain from this
change on such a machine: lmbench's pipe bandwidth result increased from
~1050MB/s to ~1850MB/s on my 2.4GHz, 400MHz FSB P4 Xeon.
2004-03-27 19:50:23 +00:00
|
|
|
(fp->f_flag & FNONBLOCK) == 0) {
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-10-12 22:26:41 +00:00
|
|
|
error = pipe_direct_write(wpipe, uio);
|
2001-05-17 19:47:09 +00:00
|
|
|
if (error)
|
1996-02-04 19:56:35 +00:00
|
|
|
break;
|
|
|
|
continue;
|
2002-02-27 07:35:59 +00:00
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
#endif
|
1996-02-04 19:56:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pipe buffered writes cannot be coincidental with
|
|
|
|
* direct writes. We wait until the currently executing
|
|
|
|
* direct write is completed before we start filling the
|
2000-03-24 00:47:37 +00:00
|
|
|
* pipe buffer. We break out if a signal occurs or the
|
|
|
|
* reader goes away.
|
1996-02-04 19:56:35 +00:00
|
|
|
*/
|
2004-08-03 02:59:15 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW) {
|
1996-02-09 04:36:36 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
2007-11-19 15:05:20 +00:00
|
|
|
pipeselwakeup(wpipe);
|
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-02-27 17:23:16 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
|
2002-02-27 07:35:59 +00:00
|
|
|
"pipbww", 0);
|
1996-02-04 19:56:35 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
2004-08-03 02:59:15 +00:00
|
|
|
else
|
|
|
|
continue;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
|
1996-03-17 04:52:10 +00:00
|
|
|
|
|
|
|
/* Writes of size <= PIPE_BUF must be atomic. */
|
1996-02-05 05:50:34 +00:00
|
|
|
if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
|
|
|
|
space = 0;
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2003-07-30 22:50:37 +00:00
|
|
|
if (space > 0) {
|
2004-08-03 02:59:15 +00:00
|
|
|
int size; /* Transfer size */
|
|
|
|
int segsize; /* first segment to transfer */
|
2001-05-17 19:47:09 +00:00
|
|
|
|
2004-08-03 02:59:15 +00:00
|
|
|
/*
|
|
|
|
* Transfer size is minimum of uio transfer
|
|
|
|
* and free space in pipe buffer.
|
|
|
|
*/
|
|
|
|
if (space > uio->uio_resid)
|
|
|
|
size = uio->uio_resid;
|
|
|
|
else
|
|
|
|
size = space;
|
|
|
|
/*
|
|
|
|
* First segment to transfer is minimum of
|
|
|
|
* transfer size and contiguous space in
|
|
|
|
* pipe buffer. If first segment to transfer
|
|
|
|
* is less than the transfer size, we've got
|
|
|
|
* a wraparound in the buffer.
|
|
|
|
*/
|
|
|
|
segsize = wpipe->pipe_buffer.size -
|
|
|
|
wpipe->pipe_buffer.in;
|
|
|
|
if (segsize > size)
|
|
|
|
segsize = size;
|
1999-12-13 02:55:47 +00:00
|
|
|
|
2004-08-03 02:59:15 +00:00
|
|
|
/* Transfer first segment */
|
|
|
|
|
|
|
|
PIPE_UNLOCK(rpipe);
|
|
|
|
error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
|
|
|
|
segsize, uio);
|
|
|
|
PIPE_LOCK(rpipe);
|
|
|
|
|
|
|
|
if (error == 0 && segsize < size) {
|
|
|
|
KASSERT(wpipe->pipe_buffer.in + segsize ==
|
|
|
|
wpipe->pipe_buffer.size,
|
|
|
|
("Pipe buffer wraparound disappeared"));
|
1999-12-13 02:55:47 +00:00
|
|
|
/*
|
2004-08-03 02:59:15 +00:00
|
|
|
* Transfer remaining part now, to
|
|
|
|
* support atomic writes. Wraparound
|
|
|
|
* happened.
|
1999-12-13 02:55:47 +00:00
|
|
|
*/
|
|
|
|
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2004-08-03 02:59:15 +00:00
|
|
|
error = uiomove(
|
|
|
|
&wpipe->pipe_buffer.buffer[0],
|
|
|
|
size - segsize, uio);
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2004-08-03 02:59:15 +00:00
|
|
|
}
|
|
|
|
if (error == 0) {
|
|
|
|
wpipe->pipe_buffer.in += size;
|
|
|
|
if (wpipe->pipe_buffer.in >=
|
|
|
|
wpipe->pipe_buffer.size) {
|
|
|
|
KASSERT(wpipe->pipe_buffer.in ==
|
|
|
|
size - segsize +
|
|
|
|
wpipe->pipe_buffer.size,
|
|
|
|
("Expected wraparound bad"));
|
|
|
|
wpipe->pipe_buffer.in = size - segsize;
|
1999-12-13 02:55:47 +00:00
|
|
|
}
|
2004-01-11 19:54:45 +00:00
|
|
|
|
2004-08-03 02:59:15 +00:00
|
|
|
wpipe->pipe_buffer.cnt += size;
|
|
|
|
KASSERT(wpipe->pipe_buffer.cnt <=
|
|
|
|
wpipe->pipe_buffer.size,
|
|
|
|
("Pipe buffer overflow"));
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2005-12-16 18:32:39 +00:00
|
|
|
if (error != 0)
|
|
|
|
break;
|
1996-01-28 23:38:26 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the "read-side" has been blocked, wake it up now.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* don't block on non-blocking I/O
|
|
|
|
*/
|
1996-07-04 04:36:56 +00:00
|
|
|
if (fp->f_flag & FNONBLOCK) {
|
1996-02-04 19:56:35 +00:00
|
|
|
error = EAGAIN;
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* We have no more space and have something to offer,
|
1997-09-14 02:43:25 +00:00
|
|
|
* wake up select/poll.
|
1996-02-11 22:09:50 +00:00
|
|
|
*/
|
|
|
|
pipeselwakeup(wpipe);
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
wpipe->pipe_state |= PIPE_WANTW;
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-02-27 17:23:16 +00:00
|
|
|
error = msleep(wpipe, PIPE_MTX(rpipe),
|
2002-02-27 07:35:59 +00:00
|
|
|
PRIBIO | PCATCH, "pipewr", 0);
|
2001-05-17 19:47:09 +00:00
|
|
|
if (error != 0)
|
1996-01-28 23:38:26 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-08-03 02:59:15 +00:00
|
|
|
pipelock(wpipe, 0);
|
1996-03-17 04:52:10 +00:00
|
|
|
--wpipe->pipe_busy;
|
2001-06-04 04:04:45 +00:00
|
|
|
|
2001-05-17 19:47:09 +00:00
|
|
|
if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
|
|
|
|
wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
|
1996-01-28 23:38:26 +00:00
|
|
|
wakeup(wpipe);
|
|
|
|
} else if (wpipe->pipe_buffer.cnt > 0) {
|
|
|
|
/*
|
|
|
|
* If we have put any characters in the buffer, we wake up
|
|
|
|
* the reader.
|
|
|
|
*/
|
|
|
|
if (wpipe->pipe_state & PIPE_WANTR) {
|
|
|
|
wpipe->pipe_state &= ~PIPE_WANTR;
|
|
|
|
wakeup(wpipe);
|
|
|
|
}
|
|
|
|
}
|
1996-02-04 22:09:12 +00:00
|
|
|
|
|
|
|
/*
|
2014-11-03 10:01:56 +00:00
|
|
|
* Don't return EPIPE if any byte was written.
|
|
|
|
* EINTR and other interrupts are handled by generic I/O layer.
|
|
|
|
* Do not pretend that I/O succeeded for obvious user error
|
|
|
|
* like EFAULT.
|
1996-02-04 22:09:12 +00:00
|
|
|
*/
|
2014-11-03 10:01:56 +00:00
|
|
|
if (uio->uio_resid != orig_resid && error == EPIPE)
|
1996-02-04 19:56:35 +00:00
|
|
|
error = 0;
|
1996-02-05 05:50:34 +00:00
|
|
|
|
1997-03-22 06:53:45 +00:00
|
|
|
if (error == 0)
|
1999-12-26 13:04:52 +00:00
|
|
|
vfs_timestamp(&wpipe->pipe_mtime);
|
1997-03-22 06:53:45 +00:00
|
|
|
|
1996-02-11 22:09:50 +00:00
|
|
|
/*
|
|
|
|
* We have something to offer,
|
1997-09-14 02:43:25 +00:00
|
|
|
* wake up select/poll.
|
1996-02-11 22:09:50 +00:00
|
|
|
*/
|
1996-02-22 03:33:52 +00:00
|
|
|
if (wpipe->pipe_buffer.cnt)
|
1996-02-11 22:09:50 +00:00
|
|
|
pipeselwakeup(wpipe);
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(wpipe);
|
2002-02-27 17:23:16 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
2001-05-17 19:47:09 +00:00
|
|
|
return (error);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
2008-01-07 20:05:19 +00:00
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_truncate(struct file *fp, off_t length, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
2008-01-07 20:05:19 +00:00
|
|
|
{
|
2014-09-12 21:20:36 +00:00
|
|
|
struct pipe *cpipe;
|
|
|
|
int error;
|
2008-01-07 20:05:19 +00:00
|
|
|
|
2014-09-12 21:20:36 +00:00
|
|
|
cpipe = fp->f_data;
|
|
|
|
if (cpipe->pipe_state & PIPE_NAMED)
|
|
|
|
error = vnops.fo_truncate(fp, length, active_cred, td);
|
|
|
|
else
|
|
|
|
error = invfo_truncate(fp, length, active_cred, td);
|
|
|
|
return (error);
|
2008-01-07 20:05:19 +00:00
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* we implement a very minimal set of ioctls for compatibility with sockets.
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct pipe *mpipe = fp->f_data;
|
2002-08-13 02:47:13 +00:00
|
|
|
int error;
|
2002-10-01 04:30:19 +00:00
|
|
|
|
|
|
|
PIPE_LOCK(mpipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
|
2002-10-01 04:30:19 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
|
2003-11-03 17:58:23 +00:00
|
|
|
if (error) {
|
|
|
|
PIPE_UNLOCK(mpipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
return (error);
|
2003-11-03 17:58:23 +00:00
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#endif
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2004-11-15 21:51:28 +00:00
|
|
|
error = 0;
|
1996-01-28 23:38:26 +00:00
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
case FIONBIO:
|
2004-11-15 21:51:28 +00:00
|
|
|
break;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
case FIOASYNC:
|
|
|
|
if (*(int *)data) {
|
|
|
|
mpipe->pipe_state |= PIPE_ASYNC;
|
|
|
|
} else {
|
|
|
|
mpipe->pipe_state &= ~PIPE_ASYNC;
|
|
|
|
}
|
2004-11-15 21:51:28 +00:00
|
|
|
break;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
|
|
|
case FIONREAD:
|
2012-02-23 18:37:30 +00:00
|
|
|
if (!(fp->f_flag & FREAD)) {
|
|
|
|
*(int *)data = 0;
|
|
|
|
PIPE_UNLOCK(mpipe);
|
|
|
|
return (0);
|
|
|
|
}
|
1996-02-11 22:09:50 +00:00
|
|
|
if (mpipe->pipe_state & PIPE_DIRECTW)
|
|
|
|
*(int *)data = mpipe->pipe_map.cnt;
|
|
|
|
else
|
|
|
|
*(int *)data = mpipe->pipe_buffer.cnt;
|
2004-11-15 21:51:28 +00:00
|
|
|
break;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
case FIOSETOWN:
|
2004-11-23 22:15:08 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
2004-11-15 21:51:28 +00:00
|
|
|
error = fsetown(*(int *)data, &mpipe->pipe_sigio);
|
2004-11-23 22:15:08 +00:00
|
|
|
goto out_unlocked;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
|
|
|
|
case FIOGETOWN:
|
2002-10-03 02:13:00 +00:00
|
|
|
*(int *)data = fgetown(&mpipe->pipe_sigio);
|
2004-11-15 21:51:28 +00:00
|
|
|
break;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/* This is deprecated, FIOSETOWN should be used instead. */
|
|
|
|
case TIOCSPGRP:
|
2004-11-23 22:15:08 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
2004-11-15 21:51:28 +00:00
|
|
|
error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
|
2004-11-23 22:15:08 +00:00
|
|
|
goto out_unlocked;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
|
|
|
|
/* This is deprecated, FIOGETOWN should be used instead. */
|
1996-10-11 02:27:30 +00:00
|
|
|
case TIOCGPGRP:
|
2002-10-03 02:13:00 +00:00
|
|
|
*(int *)data = -fgetown(&mpipe->pipe_sigio);
|
2004-11-15 21:51:28 +00:00
|
|
|
break;
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2004-11-15 21:51:28 +00:00
|
|
|
default:
|
|
|
|
error = ENOTTY;
|
2004-11-16 06:57:52 +00:00
|
|
|
break;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_UNLOCK(mpipe);
|
2004-11-23 22:15:08 +00:00
|
|
|
out_unlocked:
|
2004-11-15 21:51:28 +00:00
|
|
|
return (error);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_poll(struct file *fp, int events, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2012-02-23 18:37:30 +00:00
|
|
|
struct pipe *rpipe;
|
1996-01-28 23:38:26 +00:00
|
|
|
struct pipe *wpipe;
|
2012-02-23 18:37:30 +00:00
|
|
|
int levents, revents;
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
int error;
|
|
|
|
#endif
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
revents = 0;
|
|
|
|
rpipe = fp->f_data;
|
|
|
|
wpipe = PIPE_PEER(rpipe);
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_LOCK(rpipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
|
2002-08-13 02:47:13 +00:00
|
|
|
if (error)
|
|
|
|
goto locked_error;
|
|
|
|
#endif
|
2012-02-23 18:37:30 +00:00
|
|
|
if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM))
|
1997-09-14 02:43:25 +00:00
|
|
|
if ((rpipe->pipe_state & PIPE_DIRECTW) ||
|
2009-07-07 09:43:44 +00:00
|
|
|
(rpipe->pipe_buffer.cnt > 0))
|
1997-09-14 02:43:25 +00:00
|
|
|
revents |= events & (POLLIN | POLLRDNORM);
|
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM))
|
2008-05-23 11:14:03 +00:00
|
|
|
if (wpipe->pipe_present != PIPE_ACTIVE ||
|
|
|
|
(wpipe->pipe_state & PIPE_EOF) ||
|
1999-01-28 00:57:57 +00:00
|
|
|
(((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
|
2011-12-14 22:26:39 +00:00
|
|
|
((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF ||
|
|
|
|
wpipe->pipe_buffer.size == 0)))
|
1997-09-14 02:43:25 +00:00
|
|
|
revents |= events & (POLLOUT | POLLWRNORM);
|
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
levents = events &
|
|
|
|
(POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND);
|
|
|
|
if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents &&
|
2019-06-20 23:07:20 +00:00
|
|
|
fp->f_pipegen == rpipe->pipe_wgen)
|
2012-02-23 18:37:30 +00:00
|
|
|
events |= POLLINIGNEOF;
|
|
|
|
|
2009-07-07 09:43:44 +00:00
|
|
|
if ((events & POLLINIGNEOF) == 0) {
|
|
|
|
if (rpipe->pipe_state & PIPE_EOF) {
|
|
|
|
revents |= (events & (POLLIN | POLLRDNORM));
|
|
|
|
if (wpipe->pipe_present != PIPE_ACTIVE ||
|
|
|
|
(wpipe->pipe_state & PIPE_EOF))
|
|
|
|
revents |= POLLHUP;
|
|
|
|
}
|
|
|
|
}
|
1997-09-14 02:43:25 +00:00
|
|
|
|
|
|
|
if (revents == 0) {
|
2012-02-23 18:37:30 +00:00
|
|
|
if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) {
|
2001-09-21 22:46:54 +00:00
|
|
|
selrecord(td, &rpipe->pipe_sel);
|
2007-12-16 06:21:20 +00:00
|
|
|
if (SEL_WAITING(&rpipe->pipe_sel))
|
|
|
|
rpipe->pipe_state |= PIPE_SEL;
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
1997-09-14 02:43:25 +00:00
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) {
|
2001-09-21 22:46:54 +00:00
|
|
|
selrecord(td, &wpipe->pipe_sel);
|
2007-12-16 06:21:20 +00:00
|
|
|
if (SEL_WAITING(&wpipe->pipe_sel))
|
|
|
|
wpipe->pipe_state |= PIPE_SEL;
|
1996-02-04 19:56:35 +00:00
|
|
|
}
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
locked_error:
|
|
|
|
#endif
|
2002-02-27 07:35:59 +00:00
|
|
|
PIPE_UNLOCK(rpipe);
|
1997-09-14 02:43:25 +00:00
|
|
|
|
|
|
|
return (revents);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
2002-06-28 22:35:12 +00:00
|
|
|
/*
|
|
|
|
* We shouldn't need locks here as we're doing a read and this should
|
|
|
|
* be a natural race.
|
|
|
|
*/
|
1999-11-08 03:28:49 +00:00
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
2011-12-06 11:24:03 +00:00
|
|
|
struct pipe *pipe;
|
2002-08-13 02:47:13 +00:00
|
|
|
#ifdef MAC
|
|
|
|
int error;
|
2011-12-06 11:24:03 +00:00
|
|
|
#endif
|
1999-11-08 03:28:49 +00:00
|
|
|
|
2011-12-06 11:24:03 +00:00
|
|
|
pipe = fp->f_data;
|
2002-10-01 04:30:19 +00:00
|
|
|
PIPE_LOCK(pipe);
|
2011-12-06 11:24:03 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_pipe_check_stat(active_cred, pipe->pipe_pair);
|
2011-12-06 11:24:03 +00:00
|
|
|
if (error) {
|
|
|
|
PIPE_UNLOCK(pipe);
|
2002-08-13 02:47:13 +00:00
|
|
|
return (error);
|
2011-12-06 11:24:03 +00:00
|
|
|
}
|
2002-08-13 02:47:13 +00:00
|
|
|
#endif
|
2012-02-23 18:37:30 +00:00
|
|
|
|
|
|
|
/* For named pipes ask the underlying filesystem. */
|
|
|
|
if (pipe->pipe_state & PIPE_NAMED) {
|
|
|
|
PIPE_UNLOCK(pipe);
|
|
|
|
return (vnops.fo_stat(fp, ub, active_cred, td));
|
|
|
|
}
|
|
|
|
|
2011-12-06 11:24:03 +00:00
|
|
|
PIPE_UNLOCK(pipe);
|
|
|
|
|
2002-07-22 19:05:44 +00:00
|
|
|
bzero(ub, sizeof(*ub));
|
1996-07-12 08:14:58 +00:00
|
|
|
ub->st_mode = S_IFIFO;
|
2004-08-16 01:27:24 +00:00
|
|
|
ub->st_blksize = PAGE_SIZE;
|
2004-07-20 07:06:43 +00:00
|
|
|
if (pipe->pipe_state & PIPE_DIRECTW)
|
|
|
|
ub->st_size = pipe->pipe_map.cnt;
|
|
|
|
else
|
|
|
|
ub->st_size = pipe->pipe_buffer.cnt;
|
2016-04-26 15:38:17 +00:00
|
|
|
ub->st_blocks = howmany(ub->st_size, ub->st_blksize);
|
2010-03-28 13:13:22 +00:00
|
|
|
ub->st_atim = pipe->pipe_atime;
|
|
|
|
ub->st_mtim = pipe->pipe_mtime;
|
|
|
|
ub->st_ctim = pipe->pipe_ctime;
|
2000-05-11 22:08:20 +00:00
|
|
|
ub->st_uid = fp->f_cred->cr_uid;
|
|
|
|
ub->st_gid = fp->f_cred->cr_gid;
|
2011-10-05 16:56:06 +00:00
|
|
|
ub->st_dev = pipedev_ino;
|
|
|
|
ub->st_ino = pipe->pipe_ino;
|
1996-07-12 08:14:58 +00:00
|
|
|
/*
|
2011-10-05 16:56:06 +00:00
|
|
|
* Left as 0: st_nlink, st_rdev, st_flags, st_gen.
|
1996-07-12 08:14:58 +00:00
|
|
|
*/
|
2001-05-17 19:47:09 +00:00
|
|
|
return (0);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_close(struct file *fp, struct thread *td)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
1996-06-12 05:11:41 +00:00
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
if (fp->f_vnode != NULL)
|
|
|
|
return vnops.fo_close(fp, td);
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_ops = &badfileops;
|
2012-02-23 18:37:30 +00:00
|
|
|
pipe_dtor(fp->f_data);
|
2003-01-13 00:33:17 +00:00
|
|
|
fp->f_data = NULL;
|
2001-05-17 19:47:09 +00:00
|
|
|
return (0);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
|
|
|
|
2012-02-26 15:14:29 +00:00
|
|
|
static int
|
2012-02-28 21:45:21 +00:00
|
|
|
pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
|
2012-02-26 15:14:29 +00:00
|
|
|
{
|
|
|
|
struct pipe *cpipe;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
cpipe = fp->f_data;
|
|
|
|
if (cpipe->pipe_state & PIPE_NAMED)
|
|
|
|
error = vn_chmod(fp, mode, active_cred, td);
|
|
|
|
else
|
|
|
|
error = invfo_chmod(fp, mode, active_cred, td);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
2012-02-26 15:14:29 +00:00
|
|
|
{
|
|
|
|
struct pipe *cpipe;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
cpipe = fp->f_data;
|
|
|
|
if (cpipe->pipe_state & PIPE_NAMED)
|
|
|
|
error = vn_chown(fp, uid, gid, active_cred, td);
|
|
|
|
else
|
|
|
|
error = invfo_chown(fp, uid, gid, active_cred, td);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2014-09-22 16:20:47 +00:00
|
|
|
static int
|
|
|
|
pipe_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
|
|
|
|
{
|
|
|
|
struct pipe *pi;
|
|
|
|
|
|
|
|
if (fp->f_type == DTYPE_FIFO)
|
|
|
|
return (vn_fill_kinfo(fp, kif, fdp));
|
|
|
|
kif->kf_type = KF_TYPE_PIPE;
|
|
|
|
pi = fp->f_data;
|
|
|
|
kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi;
|
|
|
|
kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer;
|
|
|
|
kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2001-05-08 09:09:18 +00:00
|
|
|
static void
|
2018-06-01 13:26:45 +00:00
|
|
|
pipe_free_kmem(struct pipe *cpipe)
|
2001-05-08 09:09:18 +00:00
|
|
|
{
|
2002-02-27 18:49:58 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
|
|
|
|
("pipe_free_kmem: pipe mutex locked"));
|
2001-05-08 09:09:18 +00:00
|
|
|
|
|
|
|
if (cpipe->pipe_buffer.buffer != NULL) {
|
2009-03-10 21:28:43 +00:00
|
|
|
atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
|
2003-08-11 05:51:51 +00:00
|
|
|
vm_map_remove(pipe_map,
|
|
|
|
(vm_offset_t)cpipe->pipe_buffer.buffer,
|
|
|
|
(vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
|
2001-05-08 09:09:18 +00:00
|
|
|
cpipe->pipe_buffer.buffer = NULL;
|
|
|
|
}
|
|
|
|
#ifndef PIPE_NODIRECT
|
Revise the direct or optimized case to use uiomove_fromphys() by the reader
instead of ephemeral mappings using pmap_qenter() by the writer. The
writer is still, however, responsible for wiring the pages, just not
mapping them. Consequently, the allocation of KVA for the direct case is
unnecessary. Remove it and the sysctls limiting it, i.e.,
kern.ipc.maxpipekvawired and kern.ipc.amountpipekvawired. The number
of temporarily wired pages is still, however, limited by
kern.ipc.maxpipekva.
Note: On platforms lacking a direct virtual-to-physical mapping,
uiomove_fromphys() uses sf_bufs to cache ephemeral mappings. Thus,
the number of available sf_bufs can influence the performance of pipes
on platforms such i386. Surprisingly, I saw the greatest gain from this
change on such a machine: lmbench's pipe bandwidth result increased from
~1050MB/s to ~1850MB/s on my 2.4GHz, 400MHz FSB P4 Xeon.
2004-03-27 19:50:23 +00:00
|
|
|
{
|
2001-05-08 09:09:18 +00:00
|
|
|
cpipe->pipe_map.cnt = 0;
|
|
|
|
cpipe->pipe_map.pos = 0;
|
|
|
|
cpipe->pipe_map.npages = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1996-01-28 23:38:26 +00:00
|
|
|
/*
|
|
|
|
* shutdown the pipe
|
|
|
|
*/
|
|
|
|
static void
|
2018-06-01 13:26:45 +00:00
|
|
|
pipeclose(struct pipe *cpipe)
|
1996-01-28 23:38:26 +00:00
|
|
|
{
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
struct pipepair *pp;
|
1996-02-04 19:56:35 +00:00
|
|
|
struct pipe *ppipe;
|
2002-03-09 22:06:31 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
|
2002-03-09 22:06:31 +00:00
|
|
|
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
PIPE_LOCK(cpipe);
|
2004-08-03 02:59:15 +00:00
|
|
|
pipelock(cpipe, 0);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
pp = cpipe->pipe_pair;
|
2004-01-11 19:54:45 +00:00
|
|
|
|
2002-03-09 22:06:31 +00:00
|
|
|
pipeselwakeup(cpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
|
2002-03-09 22:06:31 +00:00
|
|
|
/*
|
|
|
|
* If the other side is blocked, wake it up saying that
|
|
|
|
* we want to close it down.
|
|
|
|
*/
|
2004-02-22 23:00:14 +00:00
|
|
|
cpipe->pipe_state |= PIPE_EOF;
|
2002-03-09 22:06:31 +00:00
|
|
|
while (cpipe->pipe_busy) {
|
|
|
|
wakeup(cpipe);
|
2004-02-22 23:00:14 +00:00
|
|
|
cpipe->pipe_state |= PIPE_WANT;
|
2004-08-03 02:59:15 +00:00
|
|
|
pipeunlock(cpipe);
|
2002-03-09 22:06:31 +00:00
|
|
|
msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
|
2004-08-03 02:59:15 +00:00
|
|
|
pipelock(cpipe, 0);
|
2002-03-09 22:06:31 +00:00
|
|
|
}
|
1996-02-04 19:56:35 +00:00
|
|
|
|
2002-08-13 02:47:13 +00:00
|
|
|
|
2002-03-09 22:06:31 +00:00
|
|
|
/*
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
* Disconnect from peer, if any.
|
2002-03-09 22:06:31 +00:00
|
|
|
*/
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
ppipe = cpipe->pipe_peer;
|
2008-05-23 11:14:03 +00:00
|
|
|
if (ppipe->pipe_present == PIPE_ACTIVE) {
|
2002-03-09 22:06:31 +00:00
|
|
|
pipeselwakeup(ppipe);
|
|
|
|
|
|
|
|
ppipe->pipe_state |= PIPE_EOF;
|
|
|
|
wakeup(ppipe);
|
2004-08-15 06:24:42 +00:00
|
|
|
KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
|
2002-03-09 22:06:31 +00:00
|
|
|
}
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
|
2002-03-09 22:06:31 +00:00
|
|
|
/*
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
* Mark this endpoint as free. Release kmem resources. We
|
|
|
|
* don't mark this endpoint as unused until we've finished
|
|
|
|
* doing that, or the pipe might disappear out from under
|
|
|
|
* us.
|
2002-03-09 22:06:31 +00:00
|
|
|
*/
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
2002-03-09 22:06:31 +00:00
|
|
|
pipe_free_kmem(cpipe);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
PIPE_LOCK(cpipe);
|
2008-05-23 11:14:03 +00:00
|
|
|
cpipe->pipe_present = PIPE_CLOSING;
|
2004-02-22 23:00:14 +00:00
|
|
|
pipeunlock(cpipe);
|
2008-05-23 11:14:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* knlist_clear() may sleep dropping the PIPE_MTX. Set the
|
|
|
|
* PIPE_FINALIZED, that allows other end to free the
|
|
|
|
* pipe_pair, only after the knotes are completely dismantled.
|
|
|
|
*/
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_clear(&cpipe->pipe_sel.si_note, 1);
|
2008-05-23 11:14:03 +00:00
|
|
|
cpipe->pipe_present = PIPE_FINALIZED;
|
Fix a deficiency in the selinfo interface:
If a selinfo object is recorded (via selrecord()) and then it is
quickly destroyed, with the waiters missing the opportunity to awake,
at the next iteration they will find the selinfo object destroyed,
causing a PF#.
That happens because the selinfo interface has no way to drain the
waiters before to destroy the registered selinfo object. Also this
race is quite rare to get in practice, because it would require a
selrecord(), a poll request by another thread and a quick destruction
of the selrecord()'ed selinfo object.
Fix this by adding the seldrain() routine which should be called
before to destroy the selinfo objects (in order to avoid such case),
and fix the present cases where it might have already been called.
Sometimes, the context is safe enough to prevent this type of race,
like it happens in device drivers which installs selinfo objects on
poll callbacks. There, the destruction of the selinfo object happens
at driver detach time, when all the filedescriptors should be already
closed, thus there cannot be a race.
For this case, mfi(4) device driver can be set as an example, as it
implements a full correct logic for preventing this from happening.
Sponsored by: Sandvine Incorporated
Reported by: rstone
Tested by: pluknet
Reviewed by: jhb, kib
Approved by: re (bz)
MFC after: 3 weeks
2011-08-25 15:51:54 +00:00
|
|
|
seldrain(&cpipe->pipe_sel);
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_destroy(&cpipe->pipe_sel.si_note);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If both endpoints are now closed, release the memory for the
|
|
|
|
* pipe pair. If not, unlock.
|
|
|
|
*/
|
2008-05-23 11:14:03 +00:00
|
|
|
if (ppipe->pipe_present == PIPE_FINALIZED) {
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
mac_pipe_destroy(pp);
|
Coalesce pipe allocations and frees. Previously, the pipe code
would allocate two 'struct pipe's from the pipe zone, and malloc a
mutex.
- Create a new "struct pipepair" object holding the two 'struct
pipe' instances, struct mutex, and struct label reference. Pipe
structures now have a back-pointer to the pipe pair, and a
'pipe_present' flag to indicate whether the half has been
closed.
- Perform mutex init/destroy in zone init/destroy, avoiding
reallocating the mutex for each pipe. Perform most pipe structure
setup in zone constructor.
- VM memory mappings for pageable buffers are still done outside of
the UMA zone.
- Change MAC API to speak 'struct pipepair' instead of 'struct pipe',
update many policies. MAC labels are also handled outside of the
UMA zone for now. Label-only policy modules don't have to be
recompiled, but if a module is recompiled, its pipe entry points
will need to be updated. If a module actually reached into the
pipe structures (unlikely), that would also need to be modified.
These changes substantially simplify failure handling in the pipe
code as there are many fewer possible failure modes.
On half-close, pipes no longer free the 'struct pipe' for the closed
half until a full-close takes place. However, VM mapped buffers
are still released on half-close.
Some code refactoring is now possible to clean up some of the back
references, etc; this patch attempts not to change the structure
of most of the pipe implementation, only allocation/free code
paths, so as to avoid introducing bugs (hopefully).
This cuts about 8%-9% off the cost of sequential pipe allocation
and free in system call tests on UP and SMP in my micro-benchmarks.
May or may not make a difference in macro-benchmarks, but doing
less work is good.
Reviewed by: juli, tjr
Testing help: dwhite, fenestro, scottl, et al
2004-02-01 05:56:51 +00:00
|
|
|
#endif
|
|
|
|
uma_zfree(pipe_zone, cpipe->pipe_pair);
|
|
|
|
} else
|
|
|
|
PIPE_UNLOCK(cpipe);
|
1996-01-28 23:38:26 +00:00
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2001-02-15 16:34:11 +00:00
|
|
|
/*ARGSUSED*/
|
2000-04-16 18:53:38 +00:00
|
|
|
static int
|
2001-02-15 16:34:11 +00:00
|
|
|
pipe_kqfilter(struct file *fp, struct knote *kn)
|
2000-04-16 18:53:38 +00:00
|
|
|
{
|
2002-01-13 11:58:06 +00:00
|
|
|
struct pipe *cpipe;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
/*
|
|
|
|
* If a filter is requested that is not supported by this file
|
|
|
|
* descriptor, don't return an error, but also don't ever generate an
|
|
|
|
* event.
|
|
|
|
*/
|
|
|
|
if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) {
|
|
|
|
kn->kn_fop = &pipe_nfiltops;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) {
|
|
|
|
kn->kn_fop = &pipe_nfiltops;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
cpipe = fp->f_data;
|
2004-02-22 23:00:14 +00:00
|
|
|
PIPE_LOCK(cpipe);
|
2001-02-15 16:34:11 +00:00
|
|
|
switch (kn->kn_filter) {
|
|
|
|
case EVFILT_READ:
|
|
|
|
kn->kn_fop = &pipe_rfiltops;
|
|
|
|
break;
|
|
|
|
case EVFILT_WRITE:
|
|
|
|
kn->kn_fop = &pipe_wfiltops;
|
2008-05-23 11:14:03 +00:00
|
|
|
if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
|
2002-08-05 15:03:03 +00:00
|
|
|
/* other end of pipe has been closed */
|
2004-02-22 23:00:14 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
2003-08-15 04:31:01 +00:00
|
|
|
return (EPIPE);
|
2004-02-22 23:00:14 +00:00
|
|
|
}
|
2012-02-23 18:37:30 +00:00
|
|
|
cpipe = PIPE_PEER(cpipe);
|
2001-02-15 16:34:11 +00:00
|
|
|
break;
|
|
|
|
default:
|
2004-02-22 23:00:14 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
2004-08-15 06:24:42 +00:00
|
|
|
return (EINVAL);
|
2001-02-15 16:34:11 +00:00
|
|
|
}
|
2001-06-15 20:45:01 +00:00
|
|
|
|
2012-02-23 18:37:30 +00:00
|
|
|
kn->kn_hook = cpipe;
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
filt_pipedetach(struct knote *kn)
|
|
|
|
{
|
2012-02-23 18:37:30 +00:00
|
|
|
struct pipe *cpipe = kn->kn_hook;
|
2003-10-12 07:06:02 +00:00
|
|
|
|
2004-02-22 23:00:14 +00:00
|
|
|
PIPE_LOCK(cpipe);
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
|
2002-02-27 11:27:48 +00:00
|
|
|
PIPE_UNLOCK(cpipe);
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_piperead(struct knote *kn, long hint)
|
|
|
|
{
|
2012-02-23 18:37:30 +00:00
|
|
|
struct pipe *rpipe = kn->kn_hook;
|
2000-04-16 18:53:38 +00:00
|
|
|
struct pipe *wpipe = rpipe->pipe_peer;
|
2004-08-15 06:24:42 +00:00
|
|
|
int ret;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2014-11-29 17:18:20 +00:00
|
|
|
PIPE_LOCK_ASSERT(rpipe, MA_OWNED);
|
2000-04-16 18:53:38 +00:00
|
|
|
kn->kn_data = rpipe->pipe_buffer.cnt;
|
|
|
|
if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
|
|
|
|
kn->kn_data = rpipe->pipe_map.cnt;
|
|
|
|
|
|
|
|
if ((rpipe->pipe_state & PIPE_EOF) ||
|
2008-05-23 11:14:03 +00:00
|
|
|
wpipe->pipe_present != PIPE_ACTIVE ||
|
|
|
|
(wpipe->pipe_state & PIPE_EOF)) {
|
2002-02-27 11:27:48 +00:00
|
|
|
kn->kn_flags |= EV_EOF;
|
2000-04-16 18:53:38 +00:00
|
|
|
return (1);
|
|
|
|
}
|
2004-08-15 06:24:42 +00:00
|
|
|
ret = kn->kn_data > 0;
|
|
|
|
return ret;
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static int
|
|
|
|
filt_pipewrite(struct knote *kn, long hint)
|
|
|
|
{
|
2012-02-23 18:37:30 +00:00
|
|
|
struct pipe *wpipe;
|
2019-02-19 15:46:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this end of the pipe is closed, the knote was removed from the
|
|
|
|
* knlist and the list lock (i.e., the pipe lock) is therefore not held.
|
|
|
|
*/
|
2012-02-23 18:37:30 +00:00
|
|
|
wpipe = kn->kn_hook;
|
2008-05-23 11:14:03 +00:00
|
|
|
if (wpipe->pipe_present != PIPE_ACTIVE ||
|
|
|
|
(wpipe->pipe_state & PIPE_EOF)) {
|
2000-04-16 18:53:38 +00:00
|
|
|
kn->kn_data = 0;
|
2004-01-11 19:54:45 +00:00
|
|
|
kn->kn_flags |= EV_EOF;
|
2000-04-16 18:53:38 +00:00
|
|
|
return (1);
|
|
|
|
}
|
2019-02-19 15:46:43 +00:00
|
|
|
PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
|
2011-12-14 22:26:39 +00:00
|
|
|
kn->kn_data = (wpipe->pipe_buffer.size > 0) ?
|
|
|
|
(wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF;
|
2000-09-14 20:10:19 +00:00
|
|
|
if (wpipe->pipe_state & PIPE_DIRECTW)
|
2000-04-16 18:53:38 +00:00
|
|
|
kn->kn_data = 0;
|
|
|
|
|
|
|
|
return (kn->kn_data >= PIPE_BUF);
|
|
|
|
}
|
2012-02-23 18:37:30 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
filt_pipedetach_notsup(struct knote *kn)
|
|
|
|
{
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
filt_pipenotsup(struct knote *kn, long hint)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|