2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2011-07-05 13:45:10 +00:00
|
|
|
#include "opt_capsicum.h"
|
1997-12-16 17:40:42 +00:00
|
|
|
#include "opt_compat.h"
|
2005-11-10 10:42:50 +00:00
|
|
|
#include "opt_ddb.h"
|
2008-02-23 01:01:49 +00:00
|
|
|
#include "opt_ktrace.h"
|
Add experimental support for process descriptors
A "process descriptor" file descriptor is used to manage processes
without using the PID namespace. This is required for Capsicum's
Capability Mode, where the PID namespace is unavailable.
New system calls pdfork(2) and pdkill(2) offer the functional equivalents
of fork(2) and kill(2). pdgetpid(2) allows querying the PID of the remote
process for debugging purposes. The currently-unimplemented pdwait(2) will,
in the future, allow querying rusage/exit status. In the interim, poll(2)
may be used to check (and wait for) process termination.
When a process is referenced by a process descriptor, it does not issue
SIGCHLD to the parent, making it suitable for use in libraries---a common
scenario when using library compartmentalisation from within large
applications (such as web browsers). Some observers may note a similarity
to Mach task ports; process descriptors provide a subset of this behaviour,
but in a UNIX style.
This feature is enabled by "options PROCDESC", but as with several other
Capsicum kernel features, is not enabled by default in GENERIC 9.0.
Reviewed by: jhb, kib
Approved by: re (kib), mentor (rwatson)
Sponsored by: Google Inc
2011-08-18 22:51:30 +00:00
|
|
|
#include "opt_procdesc.h"
|
2001-08-23 13:19:32 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
|
2011-07-05 13:45:10 +00:00
|
|
|
#include <sys/capability.h>
|
1995-12-02 18:58:56 +00:00
|
|
|
#include <sys/conf.h>
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
#include <sys/domain.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/file.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/filedesc.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
#include <sys/filio.h>
|
2004-07-14 19:04:31 +00:00
|
|
|
#include <sys/jail.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/kernel.h>
|
2003-04-29 13:36:06 +00:00
|
|
|
#include <sys/limits.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
#include <sys/lock.h>
|
2002-10-16 15:14:31 +00:00
|
|
|
#include <sys/malloc.h>
|
2012-04-01 18:22:48 +00:00
|
|
|
#include <sys/mman.h>
|
2002-09-14 09:02:28 +00:00
|
|
|
#include <sys/mount.h>
|
2005-11-30 05:12:03 +00:00
|
|
|
#include <sys/mqueue.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
#include <sys/mutex.h>
|
2002-04-19 00:45:29 +00:00
|
|
|
#include <sys/namei.h>
|
2011-05-12 10:11:39 +00:00
|
|
|
#include <sys/selinfo.h>
|
|
|
|
#include <sys/pipe.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
#include <sys/proc.h>
|
Add experimental support for process descriptors
A "process descriptor" file descriptor is used to manage processes
without using the PID namespace. This is required for Capsicum's
Capability Mode, where the PID namespace is unavailable.
New system calls pdfork(2) and pdkill(2) offer the functional equivalents
of fork(2) and kill(2). pdgetpid(2) allows querying the PID of the remote
process for debugging purposes. The currently-unimplemented pdwait(2) will,
in the future, allow querying rusage/exit status. In the interim, poll(2)
may be used to check (and wait for) process termination.
When a process is referenced by a process descriptor, it does not issue
SIGCHLD to the parent, making it suitable for use in libraries---a common
scenario when using library compartmentalisation from within large
applications (such as web browsers). Some observers may note a similarity
to Mach task ports; process descriptors provide a subset of this behaviour,
but in a UNIX style.
This feature is enabled by "options PROCDESC", but as with several other
Capsicum kernel features, is not enabled by default in GENERIC 9.0.
Reviewed by: jhb, kib
Approved by: re (kib), mentor (rwatson)
Sponsored by: Google Inc
2011-08-18 22:51:30 +00:00
|
|
|
#include <sys/procdesc.h>
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
#include <sys/protosw.h>
|
2011-04-06 19:13:04 +00:00
|
|
|
#include <sys/racct.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2002-05-01 20:44:46 +00:00
|
|
|
#include <sys/signalvar.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
#include <sys/socketvar.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/sx.h>
|
|
|
|
#include <sys/syscallsubr.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/sysproto.h>
|
Integrate the new MPSAFE TTY layer to the FreeBSD operating system.
The last half year I've been working on a replacement TTY layer for the
FreeBSD kernel. The new TTY layer was designed to improve the following:
- Improved driver model:
The old TTY layer has a driver model that is not abstract enough to
make it friendly to use. A good example is the output path, where the
device drivers directly access the output buffers. This means that an
in-kernel PPP implementation must always convert network buffers into
TTY buffers.
If a PPP implementation would be built on top of the new TTY layer
(still needs a hooks layer, though), it would allow the PPP
implementation to directly hand the data to the TTY driver.
- Improved hotplugging:
With the old TTY layer, it isn't entirely safe to destroy TTY's from
the system. This implementation has a two-step destructing design,
where the driver first abandons the TTY. After all threads have left
the TTY, the TTY layer calls a routine in the driver, which can be
used to free resources (unit numbers, etc).
The pts(4) driver also implements this feature, which means
posix_openpt() will now return PTY's that are created on the fly.
- Improved performance:
One of the major improvements is the per-TTY mutex, which is expected
to improve scalability when compared to the old Giant locking.
Another change is the unbuffered copying to userspace, which is both
used on TTY device nodes and PTY masters.
Upgrading should be quite straightforward. Unlike previous versions,
existing kernel configuration files do not need to be changed, except
when they reference device drivers that are listed in UPDATING.
Obtained from: //depot/projects/mpsafetty/...
Approved by: philip (ex-mentor)
Discussed: on the lists, at BSDCan, at the DevSummit
Sponsored by: Snow B.V., the Netherlands
dcons(4) fixed by: kan
2008-08-20 08:31:58 +00:00
|
|
|
#include <sys/tty.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
#include <sys/unistd.h>
|
2011-05-12 10:11:39 +00:00
|
|
|
#include <sys/un.h>
|
|
|
|
#include <sys/unpcb.h>
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
#include <sys/user.h>
|
2004-12-03 21:29:25 +00:00
|
|
|
#include <sys/vnode.h>
|
2008-02-23 01:01:49 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2011-02-16 21:29:13 +00:00
|
|
|
#include <net/vnet.h>
|
|
|
|
|
2011-05-12 10:11:39 +00:00
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_pcb.h>
|
|
|
|
|
2006-02-05 23:57:32 +00:00
|
|
|
#include <security/audit/audit.h>
|
|
|
|
|
2002-03-20 04:09:59 +00:00
|
|
|
#include <vm/uma.h>
|
2011-07-05 13:45:10 +00:00
|
|
|
#include <vm/vm.h>
|
1995-12-05 21:51:45 +00:00
|
|
|
|
2005-11-10 10:42:50 +00:00
|
|
|
#include <ddb/ddb.h>
|
|
|
|
|
2005-10-31 15:41:29 +00:00
|
|
|
static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table");
|
|
|
|
static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",
|
2012-06-14 15:21:57 +00:00
|
|
|
"file desc to leader structures");
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
|
1997-10-12 20:26:33 +00:00
|
|
|
|
2012-03-08 20:34:13 +00:00
|
|
|
MALLOC_DECLARE(M_FADVISE);
|
|
|
|
|
2003-04-30 12:57:40 +00:00
|
|
|
static uma_zone_t file_zone;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
1995-11-29 10:49:16 +00:00
|
|
|
|
2008-05-28 20:25:19 +00:00
|
|
|
/* Flags for do_dup() */
|
|
|
|
#define DUP_FIXED 0x1 /* Force fixed allocation */
|
|
|
|
#define DUP_FCNTL 0x2 /* fcntl()-style errors */
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
|
2012-06-15 10:00:29 +00:00
|
|
|
static int closefp(struct filedesc *fdp, int fd, struct file *fp,
|
|
|
|
struct thread *td, int holdleaders);
|
|
|
|
static int do_dup(struct thread *td, int flags, int old, int new,
|
|
|
|
register_t *retval);
|
|
|
|
static int fd_first_free(struct filedesc *fdp, int low, int size);
|
|
|
|
static int fd_last_used(struct filedesc *fdp, int size);
|
|
|
|
static void fdgrowtable(struct filedesc *fdp, int nfd);
|
|
|
|
static void fdunused(struct filedesc *fdp, int fd);
|
|
|
|
static void fdused(struct filedesc *fdp, int fd);
|
|
|
|
static int fill_pipe_info(struct pipe *pi, struct kinfo_file *kif);
|
|
|
|
static int fill_procdesc_info(struct procdesc *pdp,
|
|
|
|
struct kinfo_file *kif);
|
|
|
|
static int fill_pts_info(struct tty *tp, struct kinfo_file *kif);
|
|
|
|
static int fill_shm_info(struct file *fp, struct kinfo_file *kif);
|
|
|
|
static int fill_socket_info(struct socket *so, struct kinfo_file *kif);
|
|
|
|
static int fill_vnode_info(struct vnode *vp, struct kinfo_file *kif);
|
1999-11-08 03:27:14 +00:00
|
|
|
|
2004-11-07 15:34:45 +00:00
|
|
|
/*
|
|
|
|
* A process is initially started out with NDFILE descriptors stored within
|
|
|
|
* this structure, selected to be enough for typical applications based on
|
|
|
|
* the historical limit of 20 open files (and the usage of descriptors by
|
|
|
|
* shells). If these descriptors are exhausted, a larger descriptor table
|
|
|
|
* may be allocated, up to a process' resource limit; the internal arrays
|
|
|
|
* are then unused.
|
|
|
|
*/
|
|
|
|
#define NDFILE 20
|
|
|
|
#define NDSLOTSIZE sizeof(NDSLOTTYPE)
|
|
|
|
#define NDENTRIES (NDSLOTSIZE * __CHAR_BIT)
|
|
|
|
#define NDSLOT(x) ((x) / NDENTRIES)
|
|
|
|
#define NDBIT(x) ((NDSLOTTYPE)1 << ((x) % NDENTRIES))
|
|
|
|
#define NDSLOTS(x) (((x) + NDENTRIES - 1) / NDENTRIES)
|
|
|
|
|
2004-11-14 09:21:01 +00:00
|
|
|
/*
|
|
|
|
* Storage required per open file descriptor.
|
|
|
|
*/
|
|
|
|
#define OFILESIZE (sizeof(struct file *) + sizeof(char))
|
|
|
|
|
2009-05-14 03:24:22 +00:00
|
|
|
/*
|
|
|
|
* Storage to hold unused ofiles that need to be reclaimed.
|
|
|
|
*/
|
|
|
|
struct freetable {
|
|
|
|
struct file **ft_table;
|
|
|
|
SLIST_ENTRY(freetable) ft_next;
|
|
|
|
};
|
|
|
|
|
2004-11-07 15:34:45 +00:00
|
|
|
/*
|
|
|
|
* Basic allocation of descriptors:
|
|
|
|
* one of the above, plus arrays for NDFILE descriptors.
|
|
|
|
*/
|
|
|
|
struct filedesc0 {
|
|
|
|
struct filedesc fd_fd;
|
2009-05-14 03:24:22 +00:00
|
|
|
/*
|
|
|
|
* ofiles which need to be reclaimed on free.
|
|
|
|
*/
|
|
|
|
SLIST_HEAD(,freetable) fd_free;
|
2004-11-07 15:34:45 +00:00
|
|
|
/*
|
|
|
|
* These arrays are used when the number of open files is
|
|
|
|
* <= NDFILE, and are then pointed to by the pointers above.
|
|
|
|
*/
|
|
|
|
struct file *fd_dfiles[NDFILE];
|
|
|
|
char fd_dfileflags[NDFILE];
|
|
|
|
NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
|
|
|
|
};
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Descriptor management.
|
|
|
|
*/
|
2007-12-30 01:42:15 +00:00
|
|
|
volatile int openfiles; /* actual number of open files */
|
2002-05-01 20:44:46 +00:00
|
|
|
struct mtx sigio_lock; /* mtx to protect pointers to sigio */
|
2012-06-14 15:21:57 +00:00
|
|
|
void (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-12-01 08:06:27 +00:00
|
|
|
/* A mutex to protect the association between a proc and filedesc. */
|
2012-06-14 15:21:57 +00:00
|
|
|
static struct mtx fdesc_mtx;
|
2004-12-01 08:06:27 +00:00
|
|
|
|
2004-01-15 10:15:04 +00:00
|
|
|
/*
|
2012-06-13 17:12:53 +00:00
|
|
|
* If low >= size, just return low. Otherwise find the first zero bit in the
|
|
|
|
* given bitmap, starting at low and not exceeding size - 1. Return size if
|
|
|
|
* not found.
|
2004-01-15 10:15:04 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
fd_first_free(struct filedesc *fdp, int low, int size)
|
|
|
|
{
|
|
|
|
NDSLOTTYPE *map = fdp->fd_map;
|
|
|
|
NDSLOTTYPE mask;
|
|
|
|
int off, maxoff;
|
|
|
|
|
|
|
|
if (low >= size)
|
|
|
|
return (low);
|
|
|
|
|
|
|
|
off = NDSLOT(low);
|
|
|
|
if (low % NDENTRIES) {
|
|
|
|
mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
|
2004-02-16 18:38:46 +00:00
|
|
|
if ((mask &= ~map[off]) != 0UL)
|
2004-01-15 10:15:04 +00:00
|
|
|
return (off * NDENTRIES + ffsl(mask) - 1);
|
|
|
|
++off;
|
|
|
|
}
|
|
|
|
for (maxoff = NDSLOTS(size); off < maxoff; ++off)
|
2004-02-16 18:38:46 +00:00
|
|
|
if (map[off] != ~0UL)
|
|
|
|
return (off * NDENTRIES + ffsl(~map[off]) - 1);
|
2004-01-15 10:15:04 +00:00
|
|
|
return (size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-06-13 17:18:16 +00:00
|
|
|
* Find the highest non-zero bit in the given bitmap, starting at 0 and
|
|
|
|
* not exceeding size - 1. Return -1 if not found.
|
2004-01-15 10:15:04 +00:00
|
|
|
*/
|
|
|
|
static int
|
2012-06-13 17:18:16 +00:00
|
|
|
fd_last_used(struct filedesc *fdp, int size)
|
2004-01-15 10:15:04 +00:00
|
|
|
{
|
|
|
|
NDSLOTTYPE *map = fdp->fd_map;
|
|
|
|
NDSLOTTYPE mask;
|
|
|
|
int off, minoff;
|
|
|
|
|
|
|
|
off = NDSLOT(size);
|
|
|
|
if (size % NDENTRIES) {
|
|
|
|
mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES));
|
|
|
|
if ((mask &= map[off]) != 0)
|
|
|
|
return (off * NDENTRIES + flsl(mask) - 1);
|
|
|
|
--off;
|
|
|
|
}
|
2012-06-13 17:18:16 +00:00
|
|
|
for (minoff = NDSLOT(0); off >= minoff; --off)
|
2004-02-16 18:38:46 +00:00
|
|
|
if (map[off] != 0)
|
|
|
|
return (off * NDENTRIES + flsl(map[off]) - 1);
|
2012-06-13 17:18:16 +00:00
|
|
|
return (-1);
|
2004-01-15 10:15:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fdisused(struct filedesc *fdp, int fd)
|
|
|
|
{
|
2012-06-14 15:34:10 +00:00
|
|
|
|
2012-06-14 15:35:14 +00:00
|
|
|
FILEDESC_LOCK_ASSERT(fdp);
|
|
|
|
|
|
|
|
KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
|
|
|
|
("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
|
2012-06-14 15:34:10 +00:00
|
|
|
|
2004-01-15 10:15:04 +00:00
|
|
|
return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark a file descriptor as used.
|
|
|
|
*/
|
2005-02-10 12:27:58 +00:00
|
|
|
static void
|
2004-01-15 10:15:04 +00:00
|
|
|
fdused(struct filedesc *fdp, int fd)
|
|
|
|
{
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
|
|
|
|
FILEDESC_XLOCK_ASSERT(fdp);
|
2012-06-14 15:34:10 +00:00
|
|
|
|
|
|
|
KASSERT(!fdisused(fdp, fd), ("fd=%d is already used", fd));
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
|
2004-01-15 10:15:04 +00:00
|
|
|
fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
|
|
|
|
if (fd > fdp->fd_lastfile)
|
|
|
|
fdp->fd_lastfile = fd;
|
|
|
|
if (fd == fdp->fd_freefile)
|
|
|
|
fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark a file descriptor as unused.
|
|
|
|
*/
|
2004-11-07 22:16:07 +00:00
|
|
|
static void
|
2004-01-15 10:15:04 +00:00
|
|
|
fdunused(struct filedesc *fdp, int fd)
|
|
|
|
{
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
|
|
|
|
FILEDESC_XLOCK_ASSERT(fdp);
|
2012-06-14 15:34:10 +00:00
|
|
|
|
|
|
|
KASSERT(fdisused(fdp, fd), ("fd=%d is already unused", fd));
|
|
|
|
KASSERT(fdp->fd_ofiles[fd] == NULL, ("fd=%d is still in use", fd));
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
|
2004-01-15 10:15:04 +00:00
|
|
|
fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
|
|
|
|
if (fd < fdp->fd_freefile)
|
|
|
|
fdp->fd_freefile = fd;
|
|
|
|
if (fd == fdp->fd_lastfile)
|
2012-06-13 17:18:16 +00:00
|
|
|
fdp->fd_lastfile = fd_last_used(fdp, fd);
|
2004-01-15 10:15:04 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* System calls on descriptors.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct getdtablesize_args {
|
|
|
|
int dummy;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2011-04-06 19:13:04 +00:00
|
|
|
uint64_t lim;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_LOCK(p);
|
2004-01-11 19:39:14 +00:00
|
|
|
td->td_retval[0] =
|
2004-02-04 21:52:57 +00:00
|
|
|
min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
|
2011-04-06 19:13:04 +00:00
|
|
|
lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
2011-04-06 19:13:04 +00:00
|
|
|
if (lim < td->td_retval[0])
|
|
|
|
td->td_retval[0] = lim;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Duplicate a file descriptor to a particular value.
|
2000-11-18 21:01:04 +00:00
|
|
|
*
|
2007-03-05 13:10:58 +00:00
|
|
|
* Note: keep in mind that a potential race condition exists when closing
|
2000-11-18 21:01:04 +00:00
|
|
|
* descriptors from a shared descriptor table (via rfork).
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct dup2_args {
|
|
|
|
u_int from;
|
|
|
|
u_int to;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_dup2(struct thread *td, struct dup2_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to,
|
|
|
|
td->td_retval));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1994-09-25 19:34:02 +00:00
|
|
|
/*
|
|
|
|
* Duplicate a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-09-25 19:34:02 +00:00
|
|
|
struct dup_args {
|
|
|
|
u_int fd;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-09-25 19:34:02 +00:00
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_dup(struct thread *td, struct dup_args *uap)
|
1994-09-25 19:34:02 +00:00
|
|
|
{
|
|
|
|
|
2008-05-28 20:25:19 +00:00
|
|
|
return (do_dup(td, 0, (int)uap->fd, 0, td->td_retval));
|
1994-09-25 19:34:02 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* The file control system call.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fcntl_args {
|
|
|
|
int fd;
|
|
|
|
int cmd;
|
1998-07-15 06:10:16 +00:00
|
|
|
long arg;
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_fcntl(struct thread *td, struct fcntl_args *uap)
|
2002-09-02 22:24:14 +00:00
|
|
|
{
|
|
|
|
struct flock fl;
|
2008-03-26 15:23:12 +00:00
|
|
|
struct oflock ofl;
|
2002-09-02 22:24:14 +00:00
|
|
|
intptr_t arg;
|
|
|
|
int error;
|
2008-03-26 15:23:12 +00:00
|
|
|
int cmd;
|
2002-09-02 22:24:14 +00:00
|
|
|
|
|
|
|
error = 0;
|
2008-03-26 15:23:12 +00:00
|
|
|
cmd = uap->cmd;
|
2002-09-02 22:24:14 +00:00
|
|
|
switch (uap->cmd) {
|
2008-03-26 15:23:12 +00:00
|
|
|
case F_OGETLK:
|
|
|
|
case F_OSETLK:
|
|
|
|
case F_OSETLKW:
|
|
|
|
/*
|
|
|
|
* Convert old flock structure to new.
|
|
|
|
*/
|
|
|
|
error = copyin((void *)(intptr_t)uap->arg, &ofl, sizeof(ofl));
|
|
|
|
fl.l_start = ofl.l_start;
|
|
|
|
fl.l_len = ofl.l_len;
|
|
|
|
fl.l_pid = ofl.l_pid;
|
|
|
|
fl.l_type = ofl.l_type;
|
|
|
|
fl.l_whence = ofl.l_whence;
|
|
|
|
fl.l_sysid = 0;
|
|
|
|
|
|
|
|
switch (uap->cmd) {
|
|
|
|
case F_OGETLK:
|
|
|
|
cmd = F_GETLK;
|
|
|
|
break;
|
|
|
|
case F_OSETLK:
|
|
|
|
cmd = F_SETLK;
|
|
|
|
break;
|
|
|
|
case F_OSETLKW:
|
|
|
|
cmd = F_SETLKW;
|
|
|
|
break;
|
|
|
|
}
|
2002-09-02 22:24:14 +00:00
|
|
|
arg = (intptr_t)&fl;
|
|
|
|
break;
|
2008-03-26 15:23:12 +00:00
|
|
|
case F_GETLK:
|
|
|
|
case F_SETLK:
|
|
|
|
case F_SETLKW:
|
|
|
|
case F_SETLK_REMOTE:
|
|
|
|
error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl));
|
|
|
|
arg = (intptr_t)&fl;
|
|
|
|
break;
|
2002-09-02 22:24:14 +00:00
|
|
|
default:
|
|
|
|
arg = uap->arg;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2008-03-26 15:23:12 +00:00
|
|
|
error = kern_fcntl(td, uap->fd, cmd, arg);
|
2002-09-02 22:24:14 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2008-03-26 15:23:12 +00:00
|
|
|
if (uap->cmd == F_OGETLK) {
|
|
|
|
ofl.l_start = fl.l_start;
|
|
|
|
ofl.l_len = fl.l_len;
|
|
|
|
ofl.l_pid = fl.l_pid;
|
|
|
|
ofl.l_type = fl.l_type;
|
|
|
|
ofl.l_whence = fl.l_whence;
|
|
|
|
error = copyout(&ofl, (void *)(intptr_t)uap->arg, sizeof(ofl));
|
|
|
|
} else if (uap->cmd == F_GETLK) {
|
2002-10-16 15:45:37 +00:00
|
|
|
error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl));
|
2008-03-26 15:23:12 +00:00
|
|
|
}
|
2002-09-02 22:24:14 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
static inline int
|
|
|
|
fdunwrap(int fd, cap_rights_t rights, struct filedesc *fdp, struct file **fpp)
|
|
|
|
{
|
|
|
|
|
2012-06-14 16:23:16 +00:00
|
|
|
FILEDESC_LOCK_ASSERT(fdp);
|
|
|
|
|
2012-06-14 16:25:10 +00:00
|
|
|
*fpp = fget_locked(fdp, fd);
|
2011-08-11 12:30:23 +00:00
|
|
|
if (*fpp == NULL)
|
|
|
|
return (EBADF);
|
|
|
|
|
|
|
|
#ifdef CAPABILITIES
|
|
|
|
if ((*fpp)->f_type == DTYPE_CAPABILITY) {
|
|
|
|
int err = cap_funwrap(*fpp, rights, fpp);
|
|
|
|
if (err != 0) {
|
|
|
|
*fpp = NULL;
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CAPABILITIES */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2002-09-02 22:24:14 +00:00
|
|
|
int
|
|
|
|
kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-01-01 01:05:54 +00:00
|
|
|
struct filedesc *fdp;
|
2002-10-16 15:45:37 +00:00
|
|
|
struct flock *flp;
|
2003-01-01 01:05:54 +00:00
|
|
|
struct file *fp;
|
2002-10-16 15:45:37 +00:00
|
|
|
struct proc *p;
|
2003-01-01 01:05:54 +00:00
|
|
|
char *pop;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vnode *vp;
|
2002-10-16 15:45:37 +00:00
|
|
|
int error, flg, tmp;
|
2007-07-03 21:26:06 +00:00
|
|
|
int vfslocked;
|
2009-09-28 16:59:47 +00:00
|
|
|
u_int old, new;
|
|
|
|
uint64_t bsize;
|
2012-07-02 21:01:03 +00:00
|
|
|
off_t foffset;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2007-07-03 21:26:06 +00:00
|
|
|
vfslocked = 0;
|
2002-10-16 15:45:37 +00:00
|
|
|
error = 0;
|
|
|
|
flg = F_POSIX;
|
|
|
|
p = td->td_proc;
|
2001-09-01 19:04:37 +00:00
|
|
|
fdp = p->p_fd;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
|
2002-09-02 22:24:14 +00:00
|
|
|
switch (cmd) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case F_DUPFD:
|
2008-05-28 20:25:19 +00:00
|
|
|
tmp = arg;
|
|
|
|
error = do_dup(td, DUP_FCNTL, fd, tmp, td->td_retval);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-03-08 22:02:21 +00:00
|
|
|
case F_DUP2FD:
|
|
|
|
tmp = arg;
|
|
|
|
error = do_dup(td, DUP_FIXED, fd, tmp, td->td_retval);
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case F_GETFD:
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2012-06-14 16:25:10 +00:00
|
|
|
if ((fp = fget_locked(fdp, fd)) == NULL) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pop = &fdp->fd_ofileflags[fd];
|
2002-04-13 10:16:53 +00:00
|
|
|
td->td_retval[0] = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0;
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case F_SETFD:
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2012-06-14 16:25:10 +00:00
|
|
|
if ((fp = fget_locked(fdp, fd)) == NULL) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pop = &fdp->fd_ofileflags[fd];
|
2002-04-13 10:16:53 +00:00
|
|
|
*pop = (*pop &~ UF_EXCLOSE) |
|
2002-09-02 22:24:14 +00:00
|
|
|
(arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case F_GETFL:
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2011-08-11 12:30:23 +00:00
|
|
|
error = fdunwrap(fd, CAP_FCNTL, fdp, &fp);
|
|
|
|
if (error != 0) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
break;
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = OFLAGS(fp->f_flag);
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case F_SETFL:
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2011-08-11 12:30:23 +00:00
|
|
|
error = fdunwrap(fd, CAP_FCNTL, fdp, &fp);
|
|
|
|
if (error != 0) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
break;
|
|
|
|
}
|
2007-12-30 01:42:15 +00:00
|
|
|
fhold(fp);
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2007-12-30 01:42:15 +00:00
|
|
|
do {
|
|
|
|
tmp = flg = fp->f_flag;
|
|
|
|
tmp &= ~FCNTLFLAGS;
|
|
|
|
tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
|
|
|
|
} while(atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
tmp = fp->f_flag & FNONBLOCK;
|
2002-08-17 02:36:16 +00:00
|
|
|
error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
|
2000-11-18 21:01:04 +00:00
|
|
|
if (error) {
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
tmp = fp->f_flag & FASYNC;
|
2002-08-17 02:36:16 +00:00
|
|
|
error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
|
2002-10-16 15:45:37 +00:00
|
|
|
if (error == 0) {
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
2007-12-30 01:42:15 +00:00
|
|
|
atomic_clear_int(&fp->f_flag, FNONBLOCK);
|
1994-05-24 10:09:53 +00:00
|
|
|
tmp = 0;
|
2002-08-17 02:36:16 +00:00
|
|
|
(void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case F_GETOWN:
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2011-08-11 12:30:23 +00:00
|
|
|
error = fdunwrap(fd, CAP_FCNTL, fdp, &fp);
|
|
|
|
if (error != 0) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
break;
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
fhold(fp);
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2002-09-13 15:15:16 +00:00
|
|
|
error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
|
|
|
|
if (error == 0)
|
|
|
|
td->td_retval[0] = tmp;
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case F_SETOWN:
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2011-08-11 12:30:23 +00:00
|
|
|
error = fdunwrap(fd, CAP_FCNTL, fdp, &fp);
|
|
|
|
if (error != 0) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
break;
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
fhold(fp);
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2002-09-13 15:15:16 +00:00
|
|
|
tmp = arg;
|
|
|
|
error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
case F_SETLK_REMOTE:
|
|
|
|
error = priv_check(td, PRIV_NFS_LOCKD);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
flg = F_REMOTE;
|
|
|
|
goto do_setlk;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case F_SETLKW:
|
|
|
|
flg |= F_WAIT;
|
2002-08-25 13:23:09 +00:00
|
|
|
/* FALLTHROUGH F_SETLK */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case F_SETLK:
|
2008-03-26 15:23:12 +00:00
|
|
|
do_setlk:
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2011-08-11 12:30:23 +00:00
|
|
|
error = fdunwrap(fd, CAP_FLOCK, fdp, &fp);
|
|
|
|
if (error != 0) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
break;
|
|
|
|
}
|
2001-09-01 19:04:37 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
2002-09-02 22:24:14 +00:00
|
|
|
flp = (struct flock *)arg;
|
|
|
|
if (flp->l_whence == SEEK_CUR) {
|
2012-07-02 21:01:03 +00:00
|
|
|
foffset = foffset_get(fp);
|
|
|
|
if (foffset < 0 ||
|
2002-09-02 22:24:14 +00:00
|
|
|
(flp->l_start > 0 &&
|
2012-07-02 21:01:03 +00:00
|
|
|
foffset > OFF_MAX - flp->l_start)) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EOVERFLOW;
|
|
|
|
break;
|
2001-08-23 13:19:32 +00:00
|
|
|
}
|
2012-07-02 21:01:03 +00:00
|
|
|
flp->l_start += foffset;
|
2001-08-23 07:42:40 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-09-02 22:24:14 +00:00
|
|
|
/*
|
2002-10-16 15:45:37 +00:00
|
|
|
* VOP_ADVLOCK() may block.
|
2002-09-02 22:24:14 +00:00
|
|
|
*/
|
|
|
|
fhold(fp);
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
2007-07-03 21:26:06 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2002-09-02 22:24:14 +00:00
|
|
|
switch (flp->l_type) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case F_RDLCK:
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((fp->f_flag & FREAD) == 0) {
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
2003-02-15 22:43:05 +00:00
|
|
|
PROC_LOCK(p->p_leader);
|
|
|
|
p->p_leader->p_flag |= P_ADVLOCK;
|
|
|
|
PROC_UNLOCK(p->p_leader);
|
2002-10-15 00:03:40 +00:00
|
|
|
error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
|
2002-09-02 22:24:14 +00:00
|
|
|
flp, flg);
|
2000-11-18 21:01:04 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
case F_WRLCK:
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((fp->f_flag & FWRITE) == 0) {
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
2003-02-15 22:43:05 +00:00
|
|
|
PROC_LOCK(p->p_leader);
|
|
|
|
p->p_leader->p_flag |= P_ADVLOCK;
|
|
|
|
PROC_UNLOCK(p->p_leader);
|
2002-10-16 15:45:37 +00:00
|
|
|
error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
|
|
|
|
flp, flg);
|
2000-11-18 21:01:04 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
case F_UNLCK:
|
2002-10-16 15:45:37 +00:00
|
|
|
error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
|
2008-03-26 15:23:12 +00:00
|
|
|
flp, flg);
|
|
|
|
break;
|
|
|
|
case F_UNLCKSYS:
|
|
|
|
/*
|
|
|
|
* Temporary api for testing remote lock
|
|
|
|
* infrastructure.
|
|
|
|
*/
|
|
|
|
if (flg != F_REMOTE) {
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
|
|
|
|
F_UNLCKSYS, flp, flg);
|
2000-11-18 21:01:04 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
default:
|
2000-11-18 21:01:04 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2007-07-03 21:26:06 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
vfslocked = 0;
|
2012-06-17 21:04:22 +00:00
|
|
|
if (error != 0 || flp->l_type == F_UNLCK ||
|
|
|
|
flp->l_type == F_UNLCKSYS) {
|
2012-06-17 16:32:32 +00:00
|
|
|
fdrop(fp, td);
|
|
|
|
break;
|
|
|
|
}
|
2012-06-17 16:59:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for a race with close.
|
|
|
|
*
|
|
|
|
* The vnode is now advisory locked (or unlocked, but this case
|
|
|
|
* is not really important) as the caller requested.
|
|
|
|
* We had to drop the filedesc lock, so we need to recheck if
|
|
|
|
* the descriptor is still valid, because if it was closed
|
|
|
|
* in the meantime we need to remove advisory lock from the
|
|
|
|
* vnode - close on any descriptor leading to an advisory
|
|
|
|
* locked vnode, removes that lock.
|
|
|
|
* We will return 0 on purpose in that case, as the result of
|
|
|
|
* successful advisory lock might have been externally visible
|
|
|
|
* already. This is fine - effectively we pretend to the caller
|
|
|
|
* that the closing thread was a bit slower and that the
|
|
|
|
* advisory lock succeeded before the close.
|
|
|
|
*/
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2012-06-14 16:25:10 +00:00
|
|
|
if (fget_locked(fdp, fd) != fp) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2003-02-15 22:43:05 +00:00
|
|
|
flp->l_whence = SEEK_SET;
|
|
|
|
flp->l_start = 0;
|
|
|
|
flp->l_len = 0;
|
|
|
|
flp->l_type = F_UNLCK;
|
2007-07-03 21:26:06 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2003-02-15 22:43:05 +00:00
|
|
|
(void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
|
2012-06-14 12:37:41 +00:00
|
|
|
F_UNLCK, flp, F_POSIX);
|
2007-07-03 21:26:06 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
vfslocked = 0;
|
2003-02-15 22:43:05 +00:00
|
|
|
} else
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
2001-09-12 08:38:13 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case F_GETLK:
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2011-08-11 12:30:23 +00:00
|
|
|
error = fdunwrap(fd, CAP_FLOCK, fdp, &fp);
|
|
|
|
if (error != 0) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
break;
|
|
|
|
}
|
2001-09-01 19:04:37 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
2002-09-02 22:24:14 +00:00
|
|
|
flp = (struct flock *)arg;
|
|
|
|
if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
|
|
|
|
flp->l_type != F_UNLCK) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
2002-09-02 22:24:14 +00:00
|
|
|
if (flp->l_whence == SEEK_CUR) {
|
2012-07-02 21:01:03 +00:00
|
|
|
foffset = foffset_get(fp);
|
2002-09-02 22:24:14 +00:00
|
|
|
if ((flp->l_start > 0 &&
|
2012-07-02 21:01:03 +00:00
|
|
|
foffset > OFF_MAX - flp->l_start) ||
|
2002-09-02 22:24:14 +00:00
|
|
|
(flp->l_start < 0 &&
|
2012-07-02 21:01:03 +00:00
|
|
|
foffset < OFF_MIN - flp->l_start)) {
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EOVERFLOW;
|
|
|
|
break;
|
2001-08-23 13:19:32 +00:00
|
|
|
}
|
2012-07-02 21:01:03 +00:00
|
|
|
flp->l_start += foffset;
|
2001-08-23 07:42:40 +00:00
|
|
|
}
|
2002-09-02 22:24:14 +00:00
|
|
|
/*
|
2002-10-16 15:45:37 +00:00
|
|
|
* VOP_ADVLOCK() may block.
|
2002-09-02 22:24:14 +00:00
|
|
|
*/
|
|
|
|
fhold(fp);
|
2007-07-03 21:26:06 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
2007-07-03 21:26:06 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2002-09-02 22:24:14 +00:00
|
|
|
error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
|
|
|
|
F_POSIX);
|
2007-07-03 21:26:06 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
vfslocked = 0;
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2001-09-01 19:04:37 +00:00
|
|
|
break;
|
2009-09-28 16:59:47 +00:00
|
|
|
|
|
|
|
case F_RDAHEAD:
|
|
|
|
arg = arg ? 128 * 1024: 0;
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
case F_READAHEAD:
|
|
|
|
FILEDESC_SLOCK(fdp);
|
2012-06-14 16:25:10 +00:00
|
|
|
if ((fp = fget_locked(fdp, fd)) == NULL) {
|
2009-09-28 16:59:47 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (fp->f_type != DTYPE_VNODE) {
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
fhold(fp);
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
if (arg != 0) {
|
|
|
|
vp = fp->f_vnode;
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
error = vn_lock(vp, LK_SHARED);
|
|
|
|
if (error != 0)
|
|
|
|
goto readahead_vnlock_fail;
|
|
|
|
bsize = fp->f_vnode->v_mount->mnt_stat.f_iosize;
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
fp->f_seqcount = (arg + bsize - 1) / bsize;
|
|
|
|
do {
|
|
|
|
new = old = fp->f_flag;
|
|
|
|
new |= FRDAHEAD;
|
2009-11-20 22:22:53 +00:00
|
|
|
} while (!atomic_cmpset_rel_int(&fp->f_flag, old, new));
|
2009-09-28 16:59:47 +00:00
|
|
|
readahead_vnlock_fail:
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2009-11-20 22:22:53 +00:00
|
|
|
vfslocked = 0;
|
2009-09-28 16:59:47 +00:00
|
|
|
} else {
|
|
|
|
do {
|
|
|
|
new = old = fp->f_flag;
|
|
|
|
new &= ~FRDAHEAD;
|
2009-11-20 22:22:53 +00:00
|
|
|
} while (!atomic_cmpset_rel_int(&fp->f_flag, old, new));
|
2009-09-28 16:59:47 +00:00
|
|
|
}
|
|
|
|
fdrop(fp, td);
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
default:
|
2001-09-01 19:04:37 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2007-07-03 21:26:06 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-03-08 22:02:21 +00:00
|
|
|
* Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-12-14 08:32:45 +00:00
|
|
|
static int
|
2008-05-28 20:25:19 +00:00
|
|
|
do_dup(struct thread *td, int flags, int old, int new,
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
register_t *retval)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-01-01 01:05:54 +00:00
|
|
|
struct filedesc *fdp;
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
struct proc *p;
|
2000-11-18 21:01:04 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct file *delfp;
|
2012-06-11 20:00:44 +00:00
|
|
|
int error, maxfd;
|
2004-01-15 10:15:04 +00:00
|
|
|
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
p = td->td_proc;
|
|
|
|
fdp = p->p_fd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify we have a valid descriptor to dup from and possibly to
|
2008-05-28 20:25:19 +00:00
|
|
|
* dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should
|
|
|
|
* return EINVAL when the new descriptor is out of bounds.
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
*/
|
2008-05-28 20:25:19 +00:00
|
|
|
if (old < 0)
|
2002-11-26 17:22:15 +00:00
|
|
|
return (EBADF);
|
2008-05-28 20:25:19 +00:00
|
|
|
if (new < 0)
|
|
|
|
return (flags & DUP_FCNTL ? EINVAL : EBADF);
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
|
|
|
|
PROC_UNLOCK(p);
|
2004-01-15 10:15:04 +00:00
|
|
|
if (new >= maxfd)
|
2012-04-11 14:08:09 +00:00
|
|
|
return (flags & DUP_FCNTL ? EINVAL : EBADF);
|
2004-01-15 10:15:04 +00:00
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2012-06-14 16:25:10 +00:00
|
|
|
if (fget_locked(fdp, old) == NULL) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
return (EBADF);
|
|
|
|
}
|
2008-05-28 20:25:19 +00:00
|
|
|
if (flags & DUP_FIXED && old == new) {
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
*retval = new;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
fp = fdp->fd_ofiles[old];
|
|
|
|
fhold(fp);
|
|
|
|
|
|
|
|
/*
|
2004-01-15 10:15:04 +00:00
|
|
|
* If the caller specified a file descriptor, make sure the file
|
|
|
|
* table is large enough to hold it, and grab it. Otherwise, just
|
|
|
|
* allocate a new descriptor the usual way. Since the filedesc
|
|
|
|
* lock may be temporarily dropped in the process, we have to look
|
|
|
|
* out for a race.
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
*/
|
2008-05-28 20:25:19 +00:00
|
|
|
if (flags & DUP_FIXED) {
|
2011-04-06 19:13:04 +00:00
|
|
|
if (new >= fdp->fd_nfiles) {
|
|
|
|
/*
|
2011-11-15 01:48:53 +00:00
|
|
|
* The resource limits are here instead of e.g.
|
|
|
|
* fdalloc(), because the file descriptor table may be
|
|
|
|
* shared between processes, so we can't really use
|
|
|
|
* racct_add()/racct_sub(). Instead of counting the
|
|
|
|
* number of actually allocated descriptors, just put
|
|
|
|
* the limit on the size of the file descriptor table.
|
2011-04-06 19:13:04 +00:00
|
|
|
*/
|
2011-07-06 20:06:44 +00:00
|
|
|
#ifdef RACCT
|
2011-04-06 19:13:04 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
error = racct_set(p, RACCT_NOFILE, new + 1);
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
if (error != 0) {
|
|
|
|
FILEDESC_XUNLOCK(fdp);
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (EMFILE);
|
|
|
|
}
|
2011-07-06 20:06:44 +00:00
|
|
|
#endif
|
2004-01-15 10:15:04 +00:00
|
|
|
fdgrowtable(fdp, new + 1);
|
2011-04-06 19:13:04 +00:00
|
|
|
}
|
2004-01-15 10:15:04 +00:00
|
|
|
if (fdp->fd_ofiles[new] == NULL)
|
|
|
|
fdused(fdp, new);
|
|
|
|
} else {
|
2004-01-17 00:59:04 +00:00
|
|
|
if ((error = fdalloc(td, new, &new)) != 0) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2003-01-06 13:19:05 +00:00
|
|
|
fdrop(fp, td);
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-11 19:48:55 +00:00
|
|
|
KASSERT(fp == fdp->fd_ofiles[old], ("old fd has been modified"));
|
2012-06-10 13:10:21 +00:00
|
|
|
KASSERT(old != new, ("new fd is same as old"));
|
2002-01-13 11:58:06 +00:00
|
|
|
|
2012-06-11 19:51:27 +00:00
|
|
|
delfp = fdp->fd_ofiles[new];
|
|
|
|
/*
|
|
|
|
* Duplicate the source descriptor.
|
|
|
|
*/
|
|
|
|
fdp->fd_ofiles[new] = fp;
|
|
|
|
fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE;
|
|
|
|
if (new > fdp->fd_lastfile)
|
|
|
|
fdp->fd_lastfile = new;
|
|
|
|
*retval = new;
|
|
|
|
|
2012-06-11 19:53:41 +00:00
|
|
|
if (delfp != NULL) {
|
2012-06-14 12:43:37 +00:00
|
|
|
(void) closefp(fdp, new, delfp, td, 1);
|
2012-06-11 20:00:44 +00:00
|
|
|
/* closefp() drops the FILEDESC lock for us. */
|
2004-08-16 03:09:01 +00:00
|
|
|
} else {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2002-01-15 00:58:40 +00:00
|
|
|
}
|
2012-06-11 20:00:44 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/*
|
|
|
|
* If sigio is on the list associated with a process or process group,
|
|
|
|
* disable signalling from the device, remove sigio from the list and
|
|
|
|
* free sigio.
|
|
|
|
*/
|
|
|
|
void
|
2004-12-02 11:56:13 +00:00
|
|
|
funsetown(struct sigio **sigiop)
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
{
|
2002-05-06 19:31:28 +00:00
|
|
|
struct sigio *sigio;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
|
2002-05-01 20:44:46 +00:00
|
|
|
SIGIO_LOCK();
|
2002-05-06 19:31:28 +00:00
|
|
|
sigio = *sigiop;
|
2002-05-01 20:44:46 +00:00
|
|
|
if (sigio == NULL) {
|
|
|
|
SIGIO_UNLOCK();
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
return;
|
2002-05-01 20:44:46 +00:00
|
|
|
}
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
*(sigio->sio_myref) = NULL;
|
2002-02-23 11:12:57 +00:00
|
|
|
if ((sigio)->sio_pgid < 0) {
|
|
|
|
struct pgrp *pg = (sigio)->sio_pgrp;
|
|
|
|
PGRP_LOCK(pg);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
|
2000-05-26 02:09:24 +00:00
|
|
|
sigio, sio_pgsigio);
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(pg);
|
|
|
|
} else {
|
|
|
|
struct proc *p = (sigio)->sio_proc;
|
|
|
|
PROC_LOCK(p);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
|
2000-05-26 02:09:24 +00:00
|
|
|
sigio, sio_pgsigio);
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
2002-05-06 19:31:28 +00:00
|
|
|
SIGIO_UNLOCK();
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
crfree(sigio->sio_ucred);
|
2008-10-23 15:53:51 +00:00
|
|
|
free(sigio, M_SIGIO);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
|
|
|
|
2002-05-06 19:31:28 +00:00
|
|
|
/*
|
|
|
|
* Free a list of sigio structures.
|
|
|
|
* We only need to lock the SIGIO_LOCK because we have made ourselves
|
2005-03-08 00:58:50 +00:00
|
|
|
* inaccessible to callers of fsetown and therefore do not need to lock
|
2002-05-06 19:31:28 +00:00
|
|
|
* the proc or pgrp struct for the list manipulation.
|
|
|
|
*/
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
void
|
2004-12-02 11:56:13 +00:00
|
|
|
funsetownlst(struct sigiolst *sigiolst)
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
{
|
2002-02-23 11:12:57 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct pgrp *pg;
|
2002-10-16 15:45:37 +00:00
|
|
|
struct sigio *sigio;
|
2002-02-23 11:12:57 +00:00
|
|
|
|
|
|
|
sigio = SLIST_FIRST(sigiolst);
|
|
|
|
if (sigio == NULL)
|
|
|
|
return;
|
|
|
|
p = NULL;
|
|
|
|
pg = NULL;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
|
2002-02-23 11:12:57 +00:00
|
|
|
/*
|
|
|
|
* Every entry of the list should belong
|
|
|
|
* to a single proc or pgrp.
|
|
|
|
*/
|
|
|
|
if (sigio->sio_pgid < 0) {
|
|
|
|
pg = sigio->sio_pgrp;
|
2002-05-06 19:31:28 +00:00
|
|
|
PGRP_LOCK_ASSERT(pg, MA_NOTOWNED);
|
2002-02-23 11:12:57 +00:00
|
|
|
} else /* if (sigio->sio_pgid > 0) */ {
|
|
|
|
p = sigio->sio_proc;
|
2002-05-06 19:31:28 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_NOTOWNED);
|
2002-02-23 11:12:57 +00:00
|
|
|
}
|
|
|
|
|
2002-05-06 19:31:28 +00:00
|
|
|
SIGIO_LOCK();
|
2002-02-23 11:12:57 +00:00
|
|
|
while ((sigio = SLIST_FIRST(sigiolst)) != NULL) {
|
|
|
|
*(sigio->sio_myref) = NULL;
|
|
|
|
if (pg != NULL) {
|
2002-05-06 19:31:28 +00:00
|
|
|
KASSERT(sigio->sio_pgid < 0,
|
|
|
|
("Proc sigio in pgrp sigio list"));
|
|
|
|
KASSERT(sigio->sio_pgrp == pg,
|
|
|
|
("Bogus pgrp in sigio list"));
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK(pg);
|
2002-05-06 19:31:28 +00:00
|
|
|
SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio,
|
|
|
|
sio_pgsigio);
|
|
|
|
PGRP_UNLOCK(pg);
|
2002-02-23 11:12:57 +00:00
|
|
|
} else /* if (p != NULL) */ {
|
2002-05-06 19:31:28 +00:00
|
|
|
KASSERT(sigio->sio_pgid > 0,
|
|
|
|
("Pgrp sigio in proc sigio list"));
|
|
|
|
KASSERT(sigio->sio_proc == p,
|
|
|
|
("Bogus proc in sigio list"));
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_LOCK(p);
|
2002-05-06 19:31:28 +00:00
|
|
|
SLIST_REMOVE(&p->p_sigiolst, sigio, sigio,
|
|
|
|
sio_pgsigio);
|
|
|
|
PROC_UNLOCK(p);
|
2002-02-23 11:12:57 +00:00
|
|
|
}
|
2002-05-06 19:31:28 +00:00
|
|
|
SIGIO_UNLOCK();
|
|
|
|
crfree(sigio->sio_ucred);
|
2008-10-23 15:53:51 +00:00
|
|
|
free(sigio, M_SIGIO);
|
2002-05-06 19:31:28 +00:00
|
|
|
SIGIO_LOCK();
|
2002-02-23 11:12:57 +00:00
|
|
|
}
|
2002-05-06 19:31:28 +00:00
|
|
|
SIGIO_UNLOCK();
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
|
|
|
|
*
|
|
|
|
* After permission checking, add a sigio structure to the sigio list for
|
|
|
|
* the process or process group.
|
|
|
|
*/
|
|
|
|
int
|
2004-12-02 11:56:13 +00:00
|
|
|
fsetown(pid_t pgid, struct sigio **sigiop)
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
{
|
1998-11-11 10:56:07 +00:00
|
|
|
struct proc *proc;
|
|
|
|
struct pgrp *pgrp;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
struct sigio *sigio;
|
2002-05-01 20:44:46 +00:00
|
|
|
int ret;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
|
|
|
|
if (pgid == 0) {
|
2002-05-06 19:31:28 +00:00
|
|
|
funsetown(sigiop);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
return (0);
|
1998-11-11 10:56:07 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
/* Allocate and fill in the new sigio out of locks. */
|
2008-10-23 15:53:51 +00:00
|
|
|
sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
|
2002-02-23 11:12:57 +00:00
|
|
|
sigio->sio_pgid = pgid;
|
2002-02-27 18:32:23 +00:00
|
|
|
sigio->sio_ucred = crhold(curthread->td_ucred);
|
2002-02-23 11:12:57 +00:00
|
|
|
sigio->sio_myref = sigiop;
|
|
|
|
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_slock(&proctree_lock);
|
1998-11-11 10:56:07 +00:00
|
|
|
if (pgid > 0) {
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
proc = pfind(pgid);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (proc == NULL) {
|
|
|
|
ret = ESRCH;
|
|
|
|
goto fail;
|
|
|
|
}
|
1999-12-26 14:07:43 +00:00
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/*
|
|
|
|
* Policy - Don't allow a process to FSETOWN a process
|
|
|
|
* in another session.
|
|
|
|
*
|
|
|
|
* Remove this test to allow maximum flexibility or
|
|
|
|
* restrict FSETOWN to the current process or process
|
|
|
|
* group for maximum safety.
|
|
|
|
*/
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(proc);
|
2001-09-12 08:38:13 +00:00
|
|
|
if (proc->p_session != curthread->td_proc->p_session) {
|
2002-02-23 11:12:57 +00:00
|
|
|
ret = EPERM;
|
|
|
|
goto fail;
|
2001-04-24 00:51:53 +00:00
|
|
|
}
|
1999-12-26 14:07:43 +00:00
|
|
|
|
1998-11-11 10:56:07 +00:00
|
|
|
pgrp = NULL;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
} else /* if (pgid < 0) */ {
|
|
|
|
pgrp = pgfind(-pgid);
|
2002-02-23 11:12:57 +00:00
|
|
|
if (pgrp == NULL) {
|
|
|
|
ret = ESRCH;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
PGRP_UNLOCK(pgrp);
|
1999-12-26 14:07:43 +00:00
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/*
|
|
|
|
* Policy - Don't allow a process to FSETOWN a process
|
|
|
|
* in another session.
|
|
|
|
*
|
|
|
|
* Remove this test to allow maximum flexibility or
|
|
|
|
* restrict FSETOWN to the current process or process
|
|
|
|
* group for maximum safety.
|
|
|
|
*/
|
2002-02-23 11:12:57 +00:00
|
|
|
if (pgrp->pg_session != curthread->td_proc->p_session) {
|
|
|
|
ret = EPERM;
|
|
|
|
goto fail;
|
|
|
|
}
|
1999-12-26 14:07:43 +00:00
|
|
|
|
1998-11-11 10:56:07 +00:00
|
|
|
proc = NULL;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
2002-05-06 19:31:28 +00:00
|
|
|
funsetown(sigiop);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
if (pgid > 0) {
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_LOCK(proc);
|
2004-01-11 19:39:14 +00:00
|
|
|
/*
|
2002-10-16 15:45:37 +00:00
|
|
|
* Since funsetownlst() is called without the proctree
|
|
|
|
* locked, we need to check for P_WEXIT.
|
2002-05-06 19:31:28 +00:00
|
|
|
* XXX: is ESRCH correct?
|
|
|
|
*/
|
|
|
|
if ((proc->p_flag & P_WEXIT) != 0) {
|
|
|
|
PROC_UNLOCK(proc);
|
|
|
|
ret = ESRCH;
|
|
|
|
goto fail;
|
|
|
|
}
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
|
|
|
|
sigio->sio_proc = proc;
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(proc);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
} else {
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_LOCK(pgrp);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
|
|
|
|
sigio->sio_pgrp = pgrp;
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(pgrp);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
2002-05-01 20:44:46 +00:00
|
|
|
SIGIO_LOCK();
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
*sigiop = sigio;
|
2002-05-01 20:44:46 +00:00
|
|
|
SIGIO_UNLOCK();
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
return (0);
|
2002-02-23 11:12:57 +00:00
|
|
|
|
|
|
|
fail:
|
2002-04-16 17:11:34 +00:00
|
|
|
sx_sunlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
crfree(sigio->sio_ucred);
|
2008-10-23 15:53:51 +00:00
|
|
|
free(sigio, M_SIGIO);
|
2002-02-23 11:12:57 +00:00
|
|
|
return (ret);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
|
|
|
|
*/
|
|
|
|
pid_t
|
2002-10-03 02:13:00 +00:00
|
|
|
fgetown(sigiop)
|
|
|
|
struct sigio **sigiop;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
{
|
2002-10-03 02:13:00 +00:00
|
|
|
pid_t pgid;
|
|
|
|
|
|
|
|
SIGIO_LOCK();
|
|
|
|
pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
|
|
|
|
SIGIO_UNLOCK();
|
|
|
|
return (pgid);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
}
|
|
|
|
|
2012-06-11 19:57:31 +00:00
|
|
|
/*
|
|
|
|
* Function drops the filedesc lock on return.
|
|
|
|
*/
|
|
|
|
static int
|
2012-06-14 12:43:37 +00:00
|
|
|
closefp(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
|
|
|
|
int holdleaders)
|
2012-06-11 19:57:31 +00:00
|
|
|
{
|
|
|
|
struct file *fp_object;
|
2012-06-14 12:43:37 +00:00
|
|
|
int error;
|
2012-06-11 19:57:31 +00:00
|
|
|
|
|
|
|
FILEDESC_XLOCK_ASSERT(fdp);
|
|
|
|
|
2012-06-14 12:43:37 +00:00
|
|
|
if (holdleaders) {
|
|
|
|
if (td->td_proc->p_fdtol != NULL) {
|
|
|
|
/*
|
|
|
|
* Ask fdfree() to sleep to ensure that all relevant
|
|
|
|
* process leaders can be traversed in closef().
|
|
|
|
*/
|
|
|
|
fdp->fd_holdleaderscount++;
|
|
|
|
} else {
|
|
|
|
holdleaders = 0;
|
|
|
|
}
|
2012-06-11 19:57:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We now hold the fp reference that used to be owned by the
|
|
|
|
* descriptor array. We have to unlock the FILEDESC *AFTER*
|
|
|
|
* knote_fdclose to prevent a race of the fd getting opened, a knote
|
|
|
|
* added, and deleteing a knote for the new fd.
|
|
|
|
*/
|
|
|
|
knote_fdclose(td, fd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we're closing an fd with a capability, we need to notify
|
|
|
|
* mqueue if the underlying object is of type mqueue.
|
|
|
|
*/
|
|
|
|
(void)cap_funwrap(fp, 0, &fp_object);
|
|
|
|
if (fp_object->f_type == DTYPE_MQUEUE)
|
|
|
|
mq_fdclose(td, fd, fp_object);
|
|
|
|
FILEDESC_XUNLOCK(fdp);
|
|
|
|
|
|
|
|
error = closef(fp, td);
|
|
|
|
if (holdleaders) {
|
|
|
|
FILEDESC_XLOCK(fdp);
|
|
|
|
fdp->fd_holdleaderscount--;
|
|
|
|
if (fdp->fd_holdleaderscount == 0 &&
|
|
|
|
fdp->fd_holdleaderswakeup != 0) {
|
|
|
|
fdp->fd_holdleaderswakeup = 0;
|
|
|
|
wakeup(&fdp->fd_holdleaderscount);
|
|
|
|
}
|
|
|
|
FILEDESC_XUNLOCK(fdp);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Close a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1995-10-08 00:06:22 +00:00
|
|
|
struct close_args {
|
2004-01-11 19:39:14 +00:00
|
|
|
int fd;
|
1995-10-08 00:06:22 +00:00
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_close(td, uap)
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct close_args *uap;
|
|
|
|
{
|
2006-07-08 20:03:39 +00:00
|
|
|
|
|
|
|
return (kern_close(td, uap->fd));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_close(td, fd)
|
|
|
|
struct thread *td;
|
|
|
|
int fd;
|
|
|
|
{
|
2003-01-01 01:05:54 +00:00
|
|
|
struct filedesc *fdp;
|
2012-06-11 20:00:44 +00:00
|
|
|
struct file *fp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
2006-02-05 23:57:32 +00:00
|
|
|
|
|
|
|
AUDIT_SYSCLOSE(td, fd);
|
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2012-06-14 16:25:10 +00:00
|
|
|
if ((fp = fget_locked(fdp, fd)) == NULL) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2004-01-15 10:15:04 +00:00
|
|
|
return (EBADF);
|
2001-09-01 19:04:37 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
fdp->fd_ofiles[fd] = NULL;
|
2000-11-18 21:01:04 +00:00
|
|
|
fdp->fd_ofileflags[fd] = 0;
|
2004-01-15 10:15:04 +00:00
|
|
|
fdunused(fdp, fd);
|
2000-11-18 21:01:04 +00:00
|
|
|
|
2012-06-11 20:00:44 +00:00
|
|
|
/* closefp() drops the FILEDESC lock for us. */
|
2012-06-14 12:43:37 +00:00
|
|
|
return (closefp(fdp, fd, fp, td, 1));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2009-06-15 20:38:55 +00:00
|
|
|
/*
|
|
|
|
* Close open file descriptors.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct closefrom_args {
|
|
|
|
int lowfd;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_closefrom(struct thread *td, struct closefrom_args *uap)
|
2009-06-15 20:38:55 +00:00
|
|
|
{
|
|
|
|
struct filedesc *fdp;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
fdp = td->td_proc->p_fd;
|
2009-06-27 13:58:44 +00:00
|
|
|
AUDIT_ARG_FD(uap->lowfd);
|
2009-06-15 20:38:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Treat negative starting file descriptor values identical to
|
|
|
|
* closefrom(0) which closes all files.
|
|
|
|
*/
|
|
|
|
if (uap->lowfd < 0)
|
|
|
|
uap->lowfd = 0;
|
|
|
|
FILEDESC_SLOCK(fdp);
|
|
|
|
for (fd = uap->lowfd; fd < fdp->fd_nfiles; fd++) {
|
|
|
|
if (fdp->fd_ofiles[fd] != NULL) {
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
(void)kern_close(td, fd);
|
|
|
|
FILEDESC_SLOCK(fdp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2004-06-11 11:16:26 +00:00
|
|
|
#if defined(COMPAT_43)
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Return status information about a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ofstat_args {
|
|
|
|
int fd;
|
|
|
|
struct ostat *sb;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2004-12-02 11:56:13 +00:00
|
|
|
ofstat(struct thread *td, struct ofstat_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct ostat oub;
|
2005-02-07 18:44:55 +00:00
|
|
|
struct stat ub;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2005-02-07 18:44:55 +00:00
|
|
|
error = kern_fstat(td, uap->fd, &ub);
|
1999-11-18 08:08:28 +00:00
|
|
|
if (error == 0) {
|
|
|
|
cvtstat(&ub, &oub);
|
2002-10-16 15:45:37 +00:00
|
|
|
error = copyout(&oub, uap->sb, sizeof(oub));
|
1999-11-18 08:08:28 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2004-06-11 11:16:26 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return status information about a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fstat_args {
|
|
|
|
int fd;
|
|
|
|
struct stat *sb;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_fstat(struct thread *td, struct fstat_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct stat ub;
|
|
|
|
int error;
|
|
|
|
|
2005-02-07 18:44:55 +00:00
|
|
|
error = kern_fstat(td, uap->fd, &ub);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == 0)
|
2002-10-16 15:45:37 +00:00
|
|
|
error = copyout(&ub, uap->sb, sizeof(ub));
|
2005-02-07 18:44:55 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_fstat(struct thread *td, int fd, struct stat *sbp)
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
2009-06-27 13:58:44 +00:00
|
|
|
AUDIT_ARG_FD(fd);
|
2006-02-05 23:57:32 +00:00
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
if ((error = fget(td, fd, CAP_FSTAT, &fp)) != 0)
|
2005-02-07 18:44:55 +00:00
|
|
|
return (error);
|
2006-02-05 23:57:32 +00:00
|
|
|
|
2009-06-27 13:58:44 +00:00
|
|
|
AUDIT_ARG_FILE(td->td_proc, fp);
|
2006-02-05 23:57:32 +00:00
|
|
|
|
2005-02-07 18:44:55 +00:00
|
|
|
error = fo_stat(fp, sbp, td->td_ucred, td);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2008-02-23 01:01:49 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
if (error == 0 && KTRPOINT(td, KTR_STRUCT))
|
|
|
|
ktrstat(sbp);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1998-05-11 03:55:28 +00:00
|
|
|
/*
|
|
|
|
* Return status information about a file descriptor.
|
|
|
|
*/
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct nfstat_args {
|
|
|
|
int fd;
|
|
|
|
struct nstat *sb;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_nfstat(struct thread *td, struct nfstat_args *uap)
|
1998-05-11 03:55:28 +00:00
|
|
|
{
|
|
|
|
struct nstat nub;
|
2005-02-07 18:44:55 +00:00
|
|
|
struct stat ub;
|
1998-05-11 03:55:28 +00:00
|
|
|
int error;
|
|
|
|
|
2005-02-07 18:44:55 +00:00
|
|
|
error = kern_fstat(td, uap->fd, &ub);
|
1998-05-11 03:55:28 +00:00
|
|
|
if (error == 0) {
|
|
|
|
cvtnstat(&ub, &nub);
|
2002-10-16 15:45:37 +00:00
|
|
|
error = copyout(&nub, uap->sb, sizeof(nub));
|
1998-05-11 03:55:28 +00:00
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Return pathconf information about a file descriptor.
|
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fpathconf_args {
|
|
|
|
int fd;
|
|
|
|
int name;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
struct vnode *vp;
|
2001-11-14 06:30:36 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
if ((error = fget(td, uap->fd, CAP_FPATHCONF, &fp)) != 0)
|
2002-01-14 00:13:45 +00:00
|
|
|
return (error);
|
2002-10-27 18:07:41 +00:00
|
|
|
|
|
|
|
/* If asynchronous I/O is available, it works for all descriptors. */
|
|
|
|
if (uap->name == _PC_ASYNC_IO) {
|
|
|
|
td->td_retval[0] = async_io_version;
|
|
|
|
goto out;
|
|
|
|
}
|
2003-07-04 12:20:27 +00:00
|
|
|
vp = fp->f_vnode;
|
|
|
|
if (vp != NULL) {
|
2005-01-24 10:19:31 +00:00
|
|
|
int vfslocked;
|
2012-06-14 12:37:41 +00:00
|
|
|
|
2005-01-24 10:19:31 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
Use shared vnode locks instead of exclusive vnode locks for the access(),
chdir(), chroot(), eaccess(), fpathconf(), fstat(), fstatfs(), lseek()
(when figuring out the current size of the file in the SEEK_END case),
pathconf(), readlink(), and statfs() system calls.
Submitted by: ups (mostly)
Tested by: pho
MFC after: 1 month
2008-11-03 20:31:00 +00:00
|
|
|
vn_lock(vp, LK_SHARED | LK_RETRY);
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_PATHCONF(vp, uap->name, td->td_retval);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2005-01-24 10:19:31 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2003-07-04 12:20:27 +00:00
|
|
|
} else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
|
|
|
|
if (uap->name != _PC_PIPE_BUF) {
|
|
|
|
error = EINVAL;
|
|
|
|
} else {
|
|
|
|
td->td_retval[0] = PIPE_BUF;
|
2012-06-14 12:37:41 +00:00
|
|
|
error = 0;
|
2003-07-04 12:20:27 +00:00
|
|
|
}
|
|
|
|
} else {
|
2000-11-18 21:01:04 +00:00
|
|
|
error = EOPNOTSUPP;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-10-27 18:07:41 +00:00
|
|
|
out:
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
2002-10-16 15:45:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-06-09 18:50:32 +00:00
|
|
|
* Grow the file table to accomodate (at least) nfd descriptors.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2004-01-15 10:15:04 +00:00
|
|
|
static void
|
|
|
|
fdgrowtable(struct filedesc *fdp, int nfd)
|
|
|
|
{
|
2009-05-14 03:24:22 +00:00
|
|
|
struct filedesc0 *fdp0;
|
|
|
|
struct freetable *fo;
|
2004-01-15 10:15:04 +00:00
|
|
|
struct file **ntable;
|
2009-05-14 03:24:22 +00:00
|
|
|
struct file **otable;
|
2004-01-15 10:15:04 +00:00
|
|
|
char *nfileflags;
|
|
|
|
int nnfiles, onfiles;
|
|
|
|
NDSLOTTYPE *nmap;
|
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK_ASSERT(fdp);
|
2004-01-15 10:15:04 +00:00
|
|
|
|
|
|
|
KASSERT(fdp->fd_nfiles > 0,
|
|
|
|
("zero-length file table"));
|
|
|
|
|
|
|
|
/* compute the size of the new table */
|
|
|
|
onfiles = fdp->fd_nfiles;
|
|
|
|
nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
|
|
|
|
if (nnfiles <= onfiles)
|
|
|
|
/* the table is already large enough */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* allocate a new table and (if required) new bitmaps */
|
2009-05-14 03:24:22 +00:00
|
|
|
ntable = malloc((nnfiles * OFILESIZE) + sizeof(struct freetable),
|
2004-01-15 10:15:04 +00:00
|
|
|
M_FILEDESC, M_ZERO | M_WAITOK);
|
|
|
|
nfileflags = (char *)&ntable[nnfiles];
|
|
|
|
if (NDSLOTS(nnfiles) > NDSLOTS(onfiles))
|
2008-10-23 15:53:51 +00:00
|
|
|
nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE,
|
2004-01-15 10:15:04 +00:00
|
|
|
M_FILEDESC, M_ZERO | M_WAITOK);
|
|
|
|
else
|
|
|
|
nmap = NULL;
|
|
|
|
|
|
|
|
bcopy(fdp->fd_ofiles, ntable, onfiles * sizeof(*ntable));
|
|
|
|
bcopy(fdp->fd_ofileflags, nfileflags, onfiles);
|
2009-05-14 03:24:22 +00:00
|
|
|
otable = fdp->fd_ofiles;
|
2004-01-15 10:15:04 +00:00
|
|
|
fdp->fd_ofileflags = nfileflags;
|
2009-05-14 03:24:22 +00:00
|
|
|
fdp->fd_ofiles = ntable;
|
|
|
|
/*
|
|
|
|
* We must preserve ofiles until the process exits because we can't
|
|
|
|
* be certain that no threads have references to the old table via
|
|
|
|
* _fget().
|
|
|
|
*/
|
|
|
|
if (onfiles > NDFILE) {
|
|
|
|
fo = (struct freetable *)&otable[onfiles];
|
|
|
|
fdp0 = (struct filedesc0 *)fdp;
|
|
|
|
fo->ft_table = otable;
|
|
|
|
SLIST_INSERT_HEAD(&fdp0->fd_free, fo, ft_next);
|
|
|
|
}
|
2004-01-15 10:15:04 +00:00
|
|
|
if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) {
|
|
|
|
bcopy(fdp->fd_map, nmap, NDSLOTS(onfiles) * sizeof(*nmap));
|
|
|
|
if (NDSLOTS(onfiles) > NDSLOTS(NDFILE))
|
|
|
|
free(fdp->fd_map, M_FILEDESC);
|
|
|
|
fdp->fd_map = nmap;
|
|
|
|
}
|
|
|
|
fdp->fd_nfiles = nnfiles;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-01-15 10:15:04 +00:00
|
|
|
/*
|
|
|
|
* Allocate a file descriptor for the process.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2004-01-17 00:59:04 +00:00
|
|
|
fdalloc(struct thread *td, int minfd, int *result)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2004-01-15 10:15:04 +00:00
|
|
|
struct filedesc *fdp = p->p_fd;
|
2012-06-13 17:12:53 +00:00
|
|
|
int fd = -1, maxfd, allocfd;
|
2011-07-06 20:06:44 +00:00
|
|
|
#ifdef RACCT
|
|
|
|
int error;
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK_ASSERT(fdp);
|
2002-01-13 11:58:06 +00:00
|
|
|
|
2006-03-20 00:13:47 +00:00
|
|
|
if (fdp->fd_freefile > minfd)
|
2011-11-15 01:48:53 +00:00
|
|
|
minfd = fdp->fd_freefile;
|
2006-03-20 00:13:47 +00:00
|
|
|
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
|
|
|
|
PROC_UNLOCK(p);
|
2004-01-15 10:15:04 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2012-06-13 17:12:53 +00:00
|
|
|
* Search the bitmap for a free descriptor starting at minfd.
|
|
|
|
* If none is found, grow the file table.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2012-06-13 17:12:53 +00:00
|
|
|
fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
|
|
|
|
if (fd >= maxfd)
|
|
|
|
return (EMFILE);
|
|
|
|
if (fd >= fdp->fd_nfiles) {
|
|
|
|
allocfd = min(fd * 2, maxfd);
|
2011-07-06 20:06:44 +00:00
|
|
|
#ifdef RACCT
|
2011-04-06 19:13:04 +00:00
|
|
|
PROC_LOCK(p);
|
2012-06-13 17:12:53 +00:00
|
|
|
error = racct_set(p, RACCT_NOFILE, allocfd);
|
2011-04-06 19:13:04 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
if (error != 0)
|
|
|
|
return (EMFILE);
|
2011-07-06 20:06:44 +00:00
|
|
|
#endif
|
2012-06-13 17:12:53 +00:00
|
|
|
/*
|
|
|
|
* fd is already equal to first free descriptor >= minfd, so
|
|
|
|
* we only need to grow the table and we are done.
|
|
|
|
*/
|
|
|
|
fdgrowtable(fdp, allocfd);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-01-15 10:15:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform some sanity checks, then mark the file descriptor as
|
|
|
|
* used and return it to the caller.
|
|
|
|
*/
|
2012-06-13 22:12:10 +00:00
|
|
|
KASSERT(fd >= 0 && fd < min(maxfd, fdp->fd_nfiles),
|
2012-06-11 22:05:26 +00:00
|
|
|
("invalid descriptor %d", fd));
|
2004-01-15 10:15:04 +00:00
|
|
|
KASSERT(!fdisused(fdp, fd),
|
|
|
|
("fd_first_free() returned non-free descriptor"));
|
2012-06-09 12:27:30 +00:00
|
|
|
KASSERT(fdp->fd_ofiles[fd] == NULL, ("file descriptor isn't free"));
|
2012-06-07 23:33:10 +00:00
|
|
|
KASSERT(fdp->fd_ofileflags[fd] == 0, ("file flags are set"));
|
2004-01-15 10:15:04 +00:00
|
|
|
fdused(fdp, fd);
|
|
|
|
*result = fd;
|
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Check to see whether n user file descriptors are available to the process
|
|
|
|
* p.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2004-12-02 11:56:13 +00:00
|
|
|
fdavail(struct thread *td, int n)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
struct proc *p = td->td_proc;
|
2003-01-01 01:05:54 +00:00
|
|
|
struct filedesc *fdp = td->td_proc->p_fd;
|
|
|
|
int i, lim, last;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_LOCK_ASSERT(fdp);
|
2002-01-13 11:58:06 +00:00
|
|
|
|
2011-04-06 19:13:04 +00:00
|
|
|
/*
|
|
|
|
* XXX: This is only called from uipc_usrreq.c:unp_externalize();
|
|
|
|
* call racct_add() from there instead of dealing with containers
|
|
|
|
* here.
|
|
|
|
*/
|
2004-02-04 21:52:57 +00:00
|
|
|
PROC_LOCK(p);
|
|
|
|
lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
|
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0)
|
|
|
|
return (1);
|
1996-08-15 16:33:32 +00:00
|
|
|
last = min(fdp->fd_nfiles, lim);
|
2012-06-09 18:03:23 +00:00
|
|
|
for (i = fdp->fd_freefile; i < last; i++) {
|
|
|
|
if (fdp->fd_ofiles[i] == NULL && --n <= 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (1);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Create a new open file structure and allocate a file decriptor for the
|
|
|
|
* process that refers to it. We add one reference to the file for the
|
|
|
|
* descriptor table and one reference for resultfp. This is to prevent us
|
|
|
|
* being preempted and the entry in the descriptor table closed after we
|
|
|
|
* release the FILEDESC lock.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2011-04-01 13:28:34 +00:00
|
|
|
falloc(struct thread *td, struct file **resultfp, int *resultfd, int flags)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2007-12-30 01:42:15 +00:00
|
|
|
struct file *fp;
|
2011-06-30 15:22:49 +00:00
|
|
|
int error, fd;
|
|
|
|
|
|
|
|
error = falloc_noinstall(td, &fp);
|
|
|
|
if (error)
|
|
|
|
return (error); /* no reference held on error */
|
|
|
|
|
|
|
|
error = finstall(td, fp, &fd, flags);
|
|
|
|
if (error) {
|
|
|
|
fdrop(fp, td); /* one reference (fp only) */
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (resultfp != NULL)
|
|
|
|
*resultfp = fp; /* copy out result */
|
|
|
|
else
|
|
|
|
fdrop(fp, td); /* release local reference */
|
|
|
|
|
|
|
|
if (resultfd != NULL)
|
|
|
|
*resultfd = fd;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a new open file structure without allocating a file descriptor.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
falloc_noinstall(struct thread *td, struct file **resultfp)
|
|
|
|
{
|
|
|
|
struct file *fp;
|
2003-06-18 18:57:58 +00:00
|
|
|
int maxuserfiles = maxfiles - (maxfiles / 20);
|
2003-06-19 04:07:12 +00:00
|
|
|
static struct timeval lastfail;
|
|
|
|
static int curfail;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2011-06-30 15:22:49 +00:00
|
|
|
KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__));
|
|
|
|
|
2006-06-27 11:32:08 +00:00
|
|
|
if ((openfiles >= maxuserfiles &&
|
2007-06-16 23:41:43 +00:00
|
|
|
priv_check(td, PRIV_MAXFILES) != 0) ||
|
2006-06-27 11:32:08 +00:00
|
|
|
openfiles >= maxfiles) {
|
2003-06-19 04:07:12 +00:00
|
|
|
if (ppsratecheck(&lastfail, &curfail, 1)) {
|
2011-06-30 15:22:49 +00:00
|
|
|
printf("kern.maxfiles limit exceeded by uid %i, "
|
|
|
|
"please see tuning(7).\n", td->td_ucred->cr_ruid);
|
2004-01-11 19:39:14 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
return (ENFILE);
|
|
|
|
}
|
2007-12-30 01:42:15 +00:00
|
|
|
atomic_add_int(&openfiles, 1);
|
2011-06-30 15:22:49 +00:00
|
|
|
fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO);
|
2008-05-25 14:57:43 +00:00
|
|
|
refcount_init(&fp->f_count, 1);
|
2002-02-27 18:32:23 +00:00
|
|
|
fp->f_cred = crhold(td->td_ucred);
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_ops = &badfileops;
|
2004-06-19 11:40:08 +00:00
|
|
|
fp->f_data = NULL;
|
|
|
|
fp->f_vnode = NULL;
|
2011-06-30 15:22:49 +00:00
|
|
|
*resultfp = fp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install a file in a file descriptor table.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
finstall(struct thread *td, struct file *fp, int *fd, int flags)
|
|
|
|
{
|
|
|
|
struct filedesc *fdp = td->td_proc->p_fd;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
KASSERT(fd != NULL, ("%s: fd == NULL", __func__));
|
|
|
|
KASSERT(fp != NULL, ("%s: fp == NULL", __func__));
|
|
|
|
|
|
|
|
FILEDESC_XLOCK(fdp);
|
|
|
|
if ((error = fdalloc(td, 0, fd))) {
|
|
|
|
FILEDESC_XUNLOCK(fdp);
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2011-06-30 15:22:49 +00:00
|
|
|
fhold(fp);
|
|
|
|
fdp->fd_ofiles[*fd] = fp;
|
2011-03-25 14:00:36 +00:00
|
|
|
if ((flags & O_CLOEXEC) != 0)
|
2011-06-30 15:22:49 +00:00
|
|
|
fdp->fd_ofileflags[*fd] |= UF_EXCLOSE;
|
|
|
|
FILEDESC_XUNLOCK(fdp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1996-02-23 18:49:25 +00:00
|
|
|
/*
|
2003-01-01 01:01:14 +00:00
|
|
|
* Build a new filedesc structure from another.
|
|
|
|
* Copy the current, root, and jail root vnode references.
|
1996-02-23 18:49:25 +00:00
|
|
|
*/
|
|
|
|
struct filedesc *
|
2004-11-16 09:12:03 +00:00
|
|
|
fdinit(struct filedesc *fdp)
|
1996-02-23 18:49:25 +00:00
|
|
|
{
|
2003-01-01 01:05:54 +00:00
|
|
|
struct filedesc0 *newfdp;
|
1996-02-23 18:49:25 +00:00
|
|
|
|
2004-11-07 12:39:28 +00:00
|
|
|
newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_LOCK_INIT(&newfdp->fd_fd);
|
2004-11-07 12:39:28 +00:00
|
|
|
if (fdp != NULL) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2004-11-07 12:39:28 +00:00
|
|
|
newfdp->fd_fd.fd_cdir = fdp->fd_cdir;
|
|
|
|
if (newfdp->fd_fd.fd_cdir)
|
|
|
|
VREF(newfdp->fd_fd.fd_cdir);
|
|
|
|
newfdp->fd_fd.fd_rdir = fdp->fd_rdir;
|
|
|
|
if (newfdp->fd_fd.fd_rdir)
|
|
|
|
VREF(newfdp->fd_fd.fd_rdir);
|
|
|
|
newfdp->fd_fd.fd_jdir = fdp->fd_jdir;
|
|
|
|
if (newfdp->fd_fd.fd_jdir)
|
|
|
|
VREF(newfdp->fd_fd.fd_jdir);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2004-11-07 12:39:28 +00:00
|
|
|
}
|
1996-02-23 18:49:25 +00:00
|
|
|
|
|
|
|
/* Create the file descriptor table. */
|
|
|
|
newfdp->fd_fd.fd_refcnt = 1;
|
2004-12-14 09:09:51 +00:00
|
|
|
newfdp->fd_fd.fd_holdcnt = 1;
|
2003-10-02 03:57:59 +00:00
|
|
|
newfdp->fd_fd.fd_cmask = CMASK;
|
1996-02-23 18:49:25 +00:00
|
|
|
newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles;
|
|
|
|
newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags;
|
|
|
|
newfdp->fd_fd.fd_nfiles = NDFILE;
|
2004-01-15 10:15:04 +00:00
|
|
|
newfdp->fd_fd.fd_map = newfdp->fd_dmap;
|
2006-03-20 00:13:47 +00:00
|
|
|
newfdp->fd_fd.fd_lastfile = -1;
|
1996-02-23 18:49:25 +00:00
|
|
|
return (&newfdp->fd_fd);
|
|
|
|
}
|
|
|
|
|
2004-12-14 09:09:51 +00:00
|
|
|
static struct filedesc *
|
|
|
|
fdhold(struct proc *p)
|
|
|
|
{
|
|
|
|
struct filedesc *fdp;
|
|
|
|
|
|
|
|
mtx_lock(&fdesc_mtx);
|
|
|
|
fdp = p->p_fd;
|
|
|
|
if (fdp != NULL)
|
|
|
|
fdp->fd_holdcnt++;
|
|
|
|
mtx_unlock(&fdesc_mtx);
|
|
|
|
return (fdp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fddrop(struct filedesc *fdp)
|
|
|
|
{
|
2009-05-14 03:24:22 +00:00
|
|
|
struct filedesc0 *fdp0;
|
|
|
|
struct freetable *ft;
|
2004-12-14 09:09:51 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
mtx_lock(&fdesc_mtx);
|
|
|
|
i = --fdp->fd_holdcnt;
|
|
|
|
mtx_unlock(&fdesc_mtx);
|
|
|
|
if (i > 0)
|
|
|
|
return;
|
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_LOCK_DESTROY(fdp);
|
2009-05-14 03:24:22 +00:00
|
|
|
fdp0 = (struct filedesc0 *)fdp;
|
|
|
|
while ((ft = SLIST_FIRST(&fdp0->fd_free)) != NULL) {
|
|
|
|
SLIST_REMOVE_HEAD(&fdp0->fd_free, ft_next);
|
|
|
|
free(ft->ft_table, M_FILEDESC);
|
|
|
|
}
|
2008-10-23 15:53:51 +00:00
|
|
|
free(fdp, M_FILEDESC);
|
2004-12-14 09:09:51 +00:00
|
|
|
}
|
|
|
|
|
1996-02-23 18:49:25 +00:00
|
|
|
/*
|
|
|
|
* Share a filedesc structure.
|
|
|
|
*/
|
|
|
|
struct filedesc *
|
2004-12-02 11:56:13 +00:00
|
|
|
fdshare(struct filedesc *fdp)
|
1996-02-23 18:49:25 +00:00
|
|
|
{
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
|
|
|
|
FILEDESC_XLOCK(fdp);
|
2003-01-01 01:01:14 +00:00
|
|
|
fdp->fd_refcnt++;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2003-01-01 01:01:14 +00:00
|
|
|
return (fdp);
|
1996-02-23 18:49:25 +00:00
|
|
|
}
|
|
|
|
|
2004-12-14 07:20:03 +00:00
|
|
|
/*
|
|
|
|
* Unshare a filedesc structure, if necessary by making a copy
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
fdunshare(struct proc *p, struct thread *td)
|
|
|
|
{
|
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(p->p_fd);
|
2004-12-14 07:20:03 +00:00
|
|
|
if (p->p_fd->fd_refcnt > 1) {
|
|
|
|
struct filedesc *tmp;
|
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(p->p_fd);
|
2004-12-14 07:20:03 +00:00
|
|
|
tmp = fdcopy(p->p_fd);
|
|
|
|
fdfree(td);
|
|
|
|
p->p_fd = tmp;
|
|
|
|
} else
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(p->p_fd);
|
2004-12-14 07:20:03 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Copy a filedesc structure. A NULL pointer in returns a NULL reference,
|
|
|
|
* this is to ease callers, not catch errors.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
struct filedesc *
|
2004-12-02 11:56:13 +00:00
|
|
|
fdcopy(struct filedesc *fdp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-01-01 01:19:31 +00:00
|
|
|
struct filedesc *newfdp;
|
2004-01-15 10:15:04 +00:00
|
|
|
int i;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-12-26 14:07:43 +00:00
|
|
|
/* Certain daemons might not have file descriptors. */
|
1997-11-29 01:33:10 +00:00
|
|
|
if (fdp == NULL)
|
1999-12-26 14:07:43 +00:00
|
|
|
return (NULL);
|
1997-11-29 01:33:10 +00:00
|
|
|
|
2004-01-15 10:15:04 +00:00
|
|
|
newfdp = fdinit(fdp);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2004-01-15 18:50:11 +00:00
|
|
|
while (fdp->fd_lastfile >= newfdp->fd_nfiles) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
FILEDESC_XLOCK(newfdp);
|
2004-01-15 10:15:04 +00:00
|
|
|
fdgrowtable(newfdp, fdp->fd_lastfile + 1);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(newfdp);
|
|
|
|
FILEDESC_SLOCK(fdp);
|
2004-01-15 18:50:11 +00:00
|
|
|
}
|
2011-07-08 12:19:25 +00:00
|
|
|
/* copy all passable descriptors (i.e. not kqueue) */
|
2004-01-15 10:15:04 +00:00
|
|
|
newfdp->fd_freefile = -1;
|
|
|
|
for (i = 0; i <= fdp->fd_lastfile; ++i) {
|
|
|
|
if (fdisused(fdp, i) &&
|
2011-07-08 12:19:25 +00:00
|
|
|
(fdp->fd_ofiles[i]->f_ops->fo_flags & DFLAG_PASSABLE) &&
|
2009-02-11 15:22:01 +00:00
|
|
|
fdp->fd_ofiles[i]->f_ops != &badfileops) {
|
2004-01-15 10:15:04 +00:00
|
|
|
newfdp->fd_ofiles[i] = fdp->fd_ofiles[i];
|
|
|
|
newfdp->fd_ofileflags[i] = fdp->fd_ofileflags[i];
|
|
|
|
fhold(newfdp->fd_ofiles[i]);
|
|
|
|
newfdp->fd_lastfile = i;
|
|
|
|
} else {
|
|
|
|
if (newfdp->fd_freefile == -1)
|
|
|
|
newfdp->fd_freefile = i;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
|
|
|
}
|
2009-05-20 18:42:04 +00:00
|
|
|
newfdp->fd_cmask = fdp->fd_cmask;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
FILEDESC_XLOCK(newfdp);
|
2004-01-16 21:54:56 +00:00
|
|
|
for (i = 0; i <= newfdp->fd_lastfile; ++i)
|
|
|
|
if (newfdp->fd_ofiles[i] != NULL)
|
|
|
|
fdused(newfdp, i);
|
2004-01-15 10:15:04 +00:00
|
|
|
if (newfdp->fd_freefile == -1)
|
|
|
|
newfdp->fd_freefile = i;
|
2009-05-20 18:42:04 +00:00
|
|
|
FILEDESC_XUNLOCK(newfdp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (newfdp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release a filedesc structure.
|
|
|
|
*/
|
|
|
|
void
|
2004-12-02 11:56:13 +00:00
|
|
|
fdfree(struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-01-01 01:05:54 +00:00
|
|
|
struct filedesc *fdp;
|
2005-11-01 17:13:05 +00:00
|
|
|
int i, locked;
|
2003-06-02 16:05:32 +00:00
|
|
|
struct filedesc_to_leader *fdtol;
|
|
|
|
struct file *fp;
|
2005-11-01 17:13:05 +00:00
|
|
|
struct vnode *cdir, *jdir, *rdir, *vp;
|
2003-06-02 16:05:32 +00:00
|
|
|
struct flock lf;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-12-26 14:07:43 +00:00
|
|
|
/* Certain daemons might not have file descriptors. */
|
2002-10-16 15:45:37 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
1997-11-29 01:33:10 +00:00
|
|
|
if (fdp == NULL)
|
|
|
|
return;
|
|
|
|
|
2011-07-06 20:06:44 +00:00
|
|
|
#ifdef RACCT
|
2011-04-06 19:13:04 +00:00
|
|
|
PROC_LOCK(td->td_proc);
|
|
|
|
racct_set(td->td_proc, RACCT_NOFILE, 0);
|
|
|
|
PROC_UNLOCK(td->td_proc);
|
2011-07-06 20:06:44 +00:00
|
|
|
#endif
|
2011-04-06 19:13:04 +00:00
|
|
|
|
2003-06-02 16:05:32 +00:00
|
|
|
/* Check for special need to clear POSIX style locks */
|
|
|
|
fdtol = td->td_proc->p_fdtol;
|
|
|
|
if (fdtol != NULL) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
KASSERT(fdtol->fdl_refcount > 0,
|
|
|
|
("filedesc_to_refcount botch: fdl_refcount=%d",
|
|
|
|
fdtol->fdl_refcount));
|
|
|
|
if (fdtol->fdl_refcount == 1 &&
|
|
|
|
(td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
|
2012-06-09 18:03:23 +00:00
|
|
|
for (i = 0; i <= fdp->fd_lastfile; i++) {
|
|
|
|
fp = fdp->fd_ofiles[i];
|
|
|
|
if (fp == NULL || fp->f_type != DTYPE_VNODE)
|
2003-06-02 16:05:32 +00:00
|
|
|
continue;
|
|
|
|
fhold(fp);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
lf.l_type = F_UNLCK;
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
2005-11-01 17:13:05 +00:00
|
|
|
locked = VFS_LOCK_GIANT(vp->v_mount);
|
2003-06-02 16:05:32 +00:00
|
|
|
(void) VOP_ADVLOCK(vp,
|
2012-06-14 12:37:41 +00:00
|
|
|
(caddr_t)td->td_proc->p_leader, F_UNLCK,
|
|
|
|
&lf, F_POSIX);
|
2005-11-01 17:13:05 +00:00
|
|
|
VFS_UNLOCK_GIANT(locked);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
fdrop(fp, td);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
retry:
|
|
|
|
if (fdtol->fdl_refcount == 1) {
|
|
|
|
if (fdp->fd_holdleaderscount > 0 &&
|
|
|
|
(td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
|
|
|
|
/*
|
|
|
|
* close() or do_dup() has cleared a reference
|
|
|
|
* in a shared file descriptor table.
|
|
|
|
*/
|
|
|
|
fdp->fd_holdleaderswakeup = 1;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
sx_sleep(&fdp->fd_holdleaderscount,
|
|
|
|
FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0);
|
2003-06-02 16:05:32 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
if (fdtol->fdl_holdcount > 0) {
|
2004-01-11 19:39:14 +00:00
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Ensure that fdtol->fdl_leader remains
|
|
|
|
* valid in closef().
|
2003-06-02 16:05:32 +00:00
|
|
|
*/
|
|
|
|
fdtol->fdl_wakeup = 1;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,
|
|
|
|
"fdlhold", 0);
|
2003-06-02 16:05:32 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fdtol->fdl_refcount--;
|
|
|
|
if (fdtol->fdl_refcount == 0 &&
|
|
|
|
fdtol->fdl_holdcount == 0) {
|
|
|
|
fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
|
|
|
|
fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
|
|
|
|
} else
|
|
|
|
fdtol = NULL;
|
|
|
|
td->td_proc->p_fdtol = NULL;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
if (fdtol != NULL)
|
2008-10-23 15:53:51 +00:00
|
|
|
free(fdtol, M_FILEDESC_TO_LEADER);
|
2003-06-02 16:05:32 +00:00
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2004-12-14 20:48:40 +00:00
|
|
|
i = --fdp->fd_refcnt;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2004-12-14 20:48:40 +00:00
|
|
|
if (i > 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return;
|
2008-12-30 12:51:56 +00:00
|
|
|
|
2012-06-09 18:03:23 +00:00
|
|
|
for (i = 0; i <= fdp->fd_lastfile; i++) {
|
|
|
|
fp = fdp->fd_ofiles[i];
|
|
|
|
if (fp != NULL) {
|
2008-12-30 12:51:56 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2012-06-09 18:03:23 +00:00
|
|
|
fdp->fd_ofiles[i] = NULL;
|
2008-12-30 12:51:56 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
|
|
|
(void) closef(fp, td);
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2003-02-24 05:46:55 +00:00
|
|
|
|
|
|
|
/* XXX This should happen earlier. */
|
|
|
|
mtx_lock(&fdesc_mtx);
|
|
|
|
td->td_proc->p_fd = NULL;
|
|
|
|
mtx_unlock(&fdesc_mtx);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (fdp->fd_nfiles > NDFILE)
|
2008-10-23 15:53:51 +00:00
|
|
|
free(fdp->fd_ofiles, M_FILEDESC);
|
2004-01-15 10:15:04 +00:00
|
|
|
if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
|
2008-10-23 15:53:51 +00:00
|
|
|
free(fdp->fd_map, M_FILEDESC);
|
2004-12-14 09:09:51 +00:00
|
|
|
|
|
|
|
fdp->fd_nfiles = 0;
|
|
|
|
|
2005-11-01 17:13:05 +00:00
|
|
|
cdir = fdp->fd_cdir;
|
2004-12-14 09:09:51 +00:00
|
|
|
fdp->fd_cdir = NULL;
|
2005-11-01 17:13:05 +00:00
|
|
|
rdir = fdp->fd_rdir;
|
2004-12-14 09:09:51 +00:00
|
|
|
fdp->fd_rdir = NULL;
|
2005-11-01 17:13:05 +00:00
|
|
|
jdir = fdp->fd_jdir;
|
2004-12-14 09:09:51 +00:00
|
|
|
fdp->fd_jdir = NULL;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2004-12-14 09:09:51 +00:00
|
|
|
|
2005-11-01 17:13:05 +00:00
|
|
|
if (cdir) {
|
|
|
|
locked = VFS_LOCK_GIANT(cdir->v_mount);
|
|
|
|
vrele(cdir);
|
|
|
|
VFS_UNLOCK_GIANT(locked);
|
|
|
|
}
|
|
|
|
if (rdir) {
|
|
|
|
locked = VFS_LOCK_GIANT(rdir->v_mount);
|
|
|
|
vrele(rdir);
|
|
|
|
VFS_UNLOCK_GIANT(locked);
|
|
|
|
}
|
|
|
|
if (jdir) {
|
|
|
|
locked = VFS_LOCK_GIANT(jdir->v_mount);
|
|
|
|
vrele(jdir);
|
|
|
|
VFS_UNLOCK_GIANT(locked);
|
|
|
|
}
|
|
|
|
|
2004-12-14 09:09:51 +00:00
|
|
|
fddrop(fdp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2000-01-20 07:12:52 +00:00
|
|
|
/*
|
2000-01-21 02:52:54 +00:00
|
|
|
* For setugid programs, we don't want to people to use that setugidness
|
2000-01-20 07:12:52 +00:00
|
|
|
* to generate error messages which write to a file which otherwise would
|
2002-09-14 09:02:28 +00:00
|
|
|
* otherwise be off-limits to the process. We check for filesystems where
|
|
|
|
* the vnode can change out from under us after execve (like [lin]procfs).
|
2000-01-20 07:12:52 +00:00
|
|
|
*
|
|
|
|
* Since setugidsafety calls this only for fd 0, 1 and 2, this check is
|
2005-03-08 00:58:50 +00:00
|
|
|
* sufficient. We also don't check for setugidness since we know we are.
|
2000-01-20 07:12:52 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
is_unsafe(struct file *fp)
|
|
|
|
{
|
2002-09-14 09:02:28 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE) {
|
2003-06-22 08:41:43 +00:00
|
|
|
struct vnode *vp = fp->f_vnode;
|
2002-09-14 09:02:28 +00:00
|
|
|
|
|
|
|
if ((vp->v_vflag & VV_PROCDEP) != 0)
|
|
|
|
return (1);
|
|
|
|
}
|
2000-01-20 07:12:52 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make this setguid thing safe, if at all possible.
|
|
|
|
*/
|
|
|
|
void
|
2004-12-02 11:56:13 +00:00
|
|
|
setugidsafety(struct thread *td)
|
2000-01-20 07:12:52 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
struct filedesc *fdp;
|
2003-01-01 01:05:54 +00:00
|
|
|
int i;
|
2000-01-20 07:12:52 +00:00
|
|
|
|
|
|
|
/* Certain daemons might not have file descriptors. */
|
2002-10-16 15:45:37 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
2000-01-20 07:12:52 +00:00
|
|
|
if (fdp == NULL)
|
|
|
|
return;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
2002-10-16 15:45:37 +00:00
|
|
|
* Note: fdp->fd_ofiles may be reallocated out from under us while
|
2000-11-18 21:01:04 +00:00
|
|
|
* we are blocked in a close. Be careful!
|
|
|
|
*/
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2000-11-18 21:01:04 +00:00
|
|
|
for (i = 0; i <= fdp->fd_lastfile; i++) {
|
2000-01-21 06:57:52 +00:00
|
|
|
if (i > 2)
|
|
|
|
break;
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) {
|
|
|
|
struct file *fp;
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knote_fdclose(td, i);
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* NULL-out descriptor prior to close to avoid
|
|
|
|
* a race while close blocks.
|
|
|
|
*/
|
|
|
|
fp = fdp->fd_ofiles[i];
|
|
|
|
fdp->fd_ofiles[i] = NULL;
|
|
|
|
fdp->fd_ofileflags[i] = 0;
|
2004-01-15 10:15:04 +00:00
|
|
|
fdunused(fdp, i);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2001-09-12 08:38:13 +00:00
|
|
|
(void) closef(fp, td);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2000-01-20 07:12:52 +00:00
|
|
|
}
|
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2000-01-20 07:12:52 +00:00
|
|
|
}
|
|
|
|
|
2006-07-21 20:24:00 +00:00
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* If a specific file object occupies a specific file descriptor, close the
|
|
|
|
* file descriptor entry and drop a reference on the file object. This is a
|
|
|
|
* convenience function to handle a subsequent error in a function that calls
|
|
|
|
* falloc() that handles the race that another thread might have closed the
|
|
|
|
* file descriptor out from under the thread creating the file object.
|
2006-07-21 20:24:00 +00:00
|
|
|
*/
|
2004-11-07 22:16:07 +00:00
|
|
|
void
|
|
|
|
fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td)
|
|
|
|
{
|
|
|
|
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2004-11-07 22:16:07 +00:00
|
|
|
if (fdp->fd_ofiles[idx] == fp) {
|
|
|
|
fdp->fd_ofiles[idx] = NULL;
|
|
|
|
fdunused(fdp, idx);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2004-11-07 22:16:07 +00:00
|
|
|
fdrop(fp, td);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
} else
|
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2004-11-07 22:16:07 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
|
|
|
* Close any files on exec?
|
|
|
|
*/
|
|
|
|
void
|
2004-12-02 11:56:13 +00:00
|
|
|
fdcloseexec(struct thread *td)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
struct filedesc *fdp;
|
2012-06-14 12:43:37 +00:00
|
|
|
struct file *fp;
|
2003-01-01 01:05:54 +00:00
|
|
|
int i;
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1999-12-26 14:07:43 +00:00
|
|
|
/* Certain daemons might not have file descriptors. */
|
2002-10-16 15:45:37 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
1997-11-29 01:33:10 +00:00
|
|
|
if (fdp == NULL)
|
|
|
|
return;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* We cannot cache fd_ofiles or fd_ofileflags since operations
|
|
|
|
* may block and rip them out from under us.
|
|
|
|
*/
|
2012-06-14 12:37:41 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2000-11-18 21:01:04 +00:00
|
|
|
for (i = 0; i <= fdp->fd_lastfile; i++) {
|
2012-06-14 12:37:41 +00:00
|
|
|
fp = fdp->fd_ofiles[i];
|
|
|
|
if (fp != NULL && (fp->f_type == DTYPE_MQUEUE ||
|
2005-11-30 05:12:03 +00:00
|
|
|
(fdp->fd_ofileflags[i] & UF_EXCLOSE))) {
|
2000-11-18 21:01:04 +00:00
|
|
|
fdp->fd_ofiles[i] = NULL;
|
|
|
|
fdp->fd_ofileflags[i] = 0;
|
2004-01-15 10:15:04 +00:00
|
|
|
fdunused(fdp, i);
|
2012-06-14 12:43:37 +00:00
|
|
|
(void) closefp(fdp, i, fp, td, 0);
|
|
|
|
/* closefp() drops the FILEDESC lock. */
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
|
|
|
|
2002-04-19 00:45:29 +00:00
|
|
|
/*
|
|
|
|
* It is unsafe for set[ug]id processes to be started with file
|
|
|
|
* descriptors 0..2 closed, as these descriptors are given implicit
|
|
|
|
* significance in the Standard C library. fdcheckstd() will create a
|
|
|
|
* descriptor referencing /dev/null for each of stdin, stdout, and
|
|
|
|
* stderr that is not already open.
|
|
|
|
*/
|
|
|
|
int
|
2004-12-02 11:56:13 +00:00
|
|
|
fdcheckstd(struct thread *td)
|
2002-04-19 00:45:29 +00:00
|
|
|
{
|
|
|
|
struct filedesc *fdp;
|
2007-04-26 18:01:19 +00:00
|
|
|
register_t retval, save;
|
|
|
|
int i, error, devnull;
|
2002-04-19 00:45:29 +00:00
|
|
|
|
|
|
|
fdp = td->td_proc->p_fd;
|
|
|
|
if (fdp == NULL)
|
|
|
|
return (0);
|
2004-02-15 21:14:48 +00:00
|
|
|
KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared"));
|
2002-04-19 00:45:29 +00:00
|
|
|
devnull = -1;
|
|
|
|
error = 0;
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
if (fdp->fd_ofiles[i] != NULL)
|
|
|
|
continue;
|
|
|
|
if (devnull < 0) {
|
2007-04-26 18:01:19 +00:00
|
|
|
save = td->td_retval[0];
|
|
|
|
error = kern_open(td, "/dev/null", UIO_SYSSPACE,
|
|
|
|
O_RDWR, 0);
|
|
|
|
devnull = td->td_retval[0];
|
|
|
|
td->td_retval[0] = save;
|
|
|
|
if (error)
|
2002-04-19 00:45:29 +00:00
|
|
|
break;
|
2011-01-28 15:29:35 +00:00
|
|
|
KASSERT(devnull == i, ("oof, we didn't get our fd"));
|
2002-04-19 00:45:29 +00:00
|
|
|
} else {
|
- Change falloc() to acquire an fd from the process table last so that
it can do it w/o needing to hold the filelist_lock sx lock.
- fdalloc() doesn't need Giant to call free() anymore. It also doesn't
need to drop and reacquire the filedesc lock around free() now as a
result.
- Try to make the code that copies fd tables when extending the fd table in
fdalloc() a bit more readable by performing assignments in separate
statements. This is still a bit ugly though.
- Use max() instead of an if statement so to figure out the starting point
in the search-for-a-free-fd loop in fdalloc() so it reads better next to
the min() in the previous line.
- Don't grow nfiles in steps up to the size needed if we dup2() to some
really large number. Go ahead and double 'nfiles' in a loop prior
to doing the malloc().
- malloc() doesn't need Giant now.
- Use malloc() and free() instead of MALLOC() and FREE() in fdalloc().
- Check to see if the size we are going to grow to is too big, not if the
current size of the fd table is too big in the loop in fdalloc(). This
means if we are out of space or if dup2() requests too high of a fd,
then we will return an error before we go off and try to allocate some
huge table and copy the existing table into it.
- Move all of the logic for dup'ing a file descriptor into do_dup() instead
of putting some of it in do_dup() and duplicating other parts in four
different places. This makes dup(), dup2(), and fcntl(F_DUPFD) basically
wrappers of do_dup now. fcntl() still has an extra check since it uses
a different error return value in one case then the other functions.
- Add a KASSERT() for an assertion that may not always be true where the
fdcheckstd() function assumes that falloc() returns the fd requested and
not some other fd. I think that the assertion is always true because we
are always single-threaded when we get to this point, but if one was
using rfork() and another process sharing the fd table were playing with
the fd table, there might could be a problem.
- To handle the problem of a file descriptor we are dup()'ing being closed
out from under us in dup() in general, do_dup() now obtains a reference
on the file in question before calling fdalloc(). If after the call to
fdalloc() the file for the fd we are dup'ing is a different file, then
we drop our reference on the original file and return EBADF. This
race was only handled in the dup2() case before and would just retry
the operation. The error return allows the user to know they are being
stupid since they have a locking bug in their app instead of dup'ing
some other descriptor and returning it to them.
Tested on: i386, alpha, sparc64
2002-09-03 20:16:31 +00:00
|
|
|
error = do_dup(td, DUP_FIXED, devnull, i, &retval);
|
2002-04-19 00:45:29 +00:00
|
|
|
if (error != 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Internal form of close. Decrement reference count on file structure.
|
2004-11-28 14:37:17 +00:00
|
|
|
* Note: td may be NULL when closing a file that was being passed in a
|
|
|
|
* message.
|
|
|
|
*
|
|
|
|
* XXXRW: Giant is not required for the caller, but often will be held; this
|
|
|
|
* makes it moderately likely the Giant will be recursed in the VFS case.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2004-12-02 11:56:13 +00:00
|
|
|
closef(struct file *fp, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
struct flock lf;
|
2003-06-02 16:05:32 +00:00
|
|
|
struct filedesc_to_leader *fdtol;
|
|
|
|
struct filedesc *fdp;
|
2011-07-15 09:37:14 +00:00
|
|
|
struct file *fp_object;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* POSIX record locking dictates that any close releases ALL
|
|
|
|
* locks owned by this process. This is handled by setting
|
|
|
|
* a flag in the unlock to free ONLY locks obeying POSIX
|
|
|
|
* semantics, and not to free BSD-style file locks.
|
|
|
|
* If the descriptor was in a message, POSIX-style locks
|
2005-11-09 22:02:02 +00:00
|
|
|
* aren't passed with the descriptor, and the thread pointer
|
2005-11-09 20:54:25 +00:00
|
|
|
* will be NULL. Callers should be careful only to pass a
|
|
|
|
* NULL thread pointer when there really is no owning
|
|
|
|
* context that might have locks, or the locks will be
|
|
|
|
* leaked.
|
2011-07-15 09:37:14 +00:00
|
|
|
*
|
|
|
|
* If this is a capability, we do lock processing under the underlying
|
|
|
|
* node, not the capability itself.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2011-07-15 09:37:14 +00:00
|
|
|
(void)cap_funwrap(fp, 0, &fp_object);
|
2012-06-14 12:37:41 +00:00
|
|
|
if (fp_object->f_type == DTYPE_VNODE && td != NULL) {
|
2005-01-24 10:19:31 +00:00
|
|
|
int vfslocked;
|
|
|
|
|
2011-07-15 09:37:14 +00:00
|
|
|
vp = fp_object->f_vnode;
|
2005-01-24 10:19:31 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2003-06-02 16:05:32 +00:00
|
|
|
if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
|
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
lf.l_type = F_UNLCK;
|
|
|
|
(void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
|
2012-06-14 12:37:41 +00:00
|
|
|
F_UNLCK, &lf, F_POSIX);
|
2003-06-02 16:05:32 +00:00
|
|
|
}
|
|
|
|
fdtol = td->td_proc->p_fdtol;
|
|
|
|
if (fdtol != NULL) {
|
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Handle special case where file descriptor table is
|
|
|
|
* shared between multiple process leaders.
|
2003-06-02 16:05:32 +00:00
|
|
|
*/
|
2012-06-16 12:56:36 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
for (fdtol = fdtol->fdl_next;
|
|
|
|
fdtol != td->td_proc->p_fdtol;
|
|
|
|
fdtol = fdtol->fdl_next) {
|
|
|
|
if ((fdtol->fdl_leader->p_flag &
|
|
|
|
P_ADVLOCK) == 0)
|
|
|
|
continue;
|
|
|
|
fdtol->fdl_holdcount++;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
lf.l_type = F_UNLCK;
|
2011-07-15 09:37:14 +00:00
|
|
|
vp = fp_object->f_vnode;
|
2003-06-02 16:05:32 +00:00
|
|
|
(void) VOP_ADVLOCK(vp,
|
2012-06-14 12:37:41 +00:00
|
|
|
(caddr_t)fdtol->fdl_leader, F_UNLCK, &lf,
|
|
|
|
F_POSIX);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
fdtol->fdl_holdcount--;
|
|
|
|
if (fdtol->fdl_holdcount == 0 &&
|
|
|
|
fdtol->fdl_wakeup != 0) {
|
|
|
|
fdtol->fdl_wakeup = 0;
|
|
|
|
wakeup(fdtol);
|
|
|
|
}
|
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
}
|
2005-01-24 10:19:31 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
return (fdrop(fp, td));
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
}
|
|
|
|
|
2007-12-30 01:42:15 +00:00
|
|
|
/*
|
|
|
|
* Initialize the file pointer with the specified properties.
|
2011-11-15 01:48:53 +00:00
|
|
|
*
|
2007-12-30 01:42:15 +00:00
|
|
|
* The ops are set with release semantics to be certain that the flags, type,
|
|
|
|
* and data are visible when ops is. This is to prevent ops methods from being
|
|
|
|
* called with bad data.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
|
|
|
|
{
|
|
|
|
fp->f_data = data;
|
|
|
|
fp->f_flag = flag;
|
|
|
|
fp->f_type = type;
|
|
|
|
atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
|
|
|
|
}
|
|
|
|
|
2009-05-14 03:24:22 +00:00
|
|
|
struct file *
|
|
|
|
fget_unlocked(struct filedesc *fdp, int fd)
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
u_int count;
|
|
|
|
|
2012-06-13 22:12:10 +00:00
|
|
|
if (fd < 0 || fd >= fdp->fd_nfiles)
|
2009-05-14 03:24:22 +00:00
|
|
|
return (NULL);
|
|
|
|
/*
|
|
|
|
* Fetch the descriptor locklessly. We avoid fdrop() races by
|
|
|
|
* never raising a refcount above 0. To accomplish this we have
|
|
|
|
* to use a cmpset loop rather than an atomic_add. The descriptor
|
|
|
|
* must be re-verified once we acquire a reference to be certain
|
|
|
|
* that the identity is still correct and we did not lose a race
|
|
|
|
* due to preemption.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
|
|
|
fp = fdp->fd_ofiles[fd];
|
|
|
|
if (fp == NULL)
|
|
|
|
break;
|
|
|
|
count = fp->f_count;
|
|
|
|
if (count == 0)
|
|
|
|
continue;
|
2009-06-02 06:55:32 +00:00
|
|
|
/*
|
|
|
|
* Use an acquire barrier to prevent caching of fd_ofiles
|
|
|
|
* so it is refreshed for verification.
|
|
|
|
*/
|
|
|
|
if (atomic_cmpset_acq_int(&fp->f_count, count, count + 1) != 1)
|
2009-05-14 03:24:22 +00:00
|
|
|
continue;
|
2009-06-02 06:55:32 +00:00
|
|
|
if (fp == fdp->fd_ofiles[fd])
|
2009-05-14 03:24:22 +00:00
|
|
|
break;
|
|
|
|
fdrop(fp, curthread);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (fp);
|
|
|
|
}
|
2007-12-30 01:42:15 +00:00
|
|
|
|
2001-11-14 06:30:36 +00:00
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Extract the file pointer associated with the specified descriptor for the
|
|
|
|
* current user process.
|
2002-01-14 00:13:45 +00:00
|
|
|
*
|
2009-04-15 19:10:37 +00:00
|
|
|
* If the descriptor doesn't exist or doesn't match 'flags', EBADF is
|
|
|
|
* returned.
|
2001-11-14 06:30:36 +00:00
|
|
|
*
|
2011-07-05 13:45:10 +00:00
|
|
|
* If the FGET_GETCAP flag is set, the capability itself will be returned.
|
|
|
|
* Calling _fget() with FGET_GETCAP on a non-capability will return EINVAL.
|
|
|
|
* Otherwise, if the file is a capability, its rights will be checked against
|
|
|
|
* the capability rights mask, and if successful, the object will be unwrapped.
|
|
|
|
*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* If an error occured the non-zero error is returned and *fpp is set to
|
2009-05-14 03:24:22 +00:00
|
|
|
* NULL. Otherwise *fpp is held and set and zero is returned. Caller is
|
|
|
|
* responsible for fdrop().
|
2001-11-14 06:30:36 +00:00
|
|
|
*/
|
2011-07-05 13:45:10 +00:00
|
|
|
#define FGET_GETCAP 0x00000001
|
2002-10-16 15:45:37 +00:00
|
|
|
static __inline int
|
2011-07-05 13:45:10 +00:00
|
|
|
_fget(struct thread *td, int fd, struct file **fpp, int flags,
|
2011-08-11 12:30:23 +00:00
|
|
|
cap_rights_t needrights, cap_rights_t *haverightsp, u_char *maxprotp,
|
2011-07-05 13:45:10 +00:00
|
|
|
int fget_flags)
|
2001-11-14 06:30:36 +00:00
|
|
|
{
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct file *fp;
|
2011-07-05 13:45:10 +00:00
|
|
|
#ifdef CAPABILITIES
|
|
|
|
struct file *fp_fromcap;
|
|
|
|
#endif
|
2012-07-09 05:39:31 +00:00
|
|
|
int error;
|
2001-11-14 06:30:36 +00:00
|
|
|
|
|
|
|
*fpp = NULL;
|
2002-01-14 00:13:45 +00:00
|
|
|
if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
|
2002-10-16 15:45:37 +00:00
|
|
|
return (EBADF);
|
2009-05-14 03:24:22 +00:00
|
|
|
if ((fp = fget_unlocked(fdp, fd)) == NULL)
|
|
|
|
return (EBADF);
|
|
|
|
if (fp->f_ops == &badfileops) {
|
|
|
|
fdrop(fp, td);
|
2002-10-16 15:45:37 +00:00
|
|
|
return (EBADF);
|
2002-01-14 00:13:45 +00:00
|
|
|
}
|
2011-07-05 13:45:10 +00:00
|
|
|
|
|
|
|
#ifdef CAPABILITIES
|
2011-08-13 09:21:16 +00:00
|
|
|
/*
|
|
|
|
* If this is a capability, what rights does it have?
|
|
|
|
*/
|
|
|
|
if (haverightsp != NULL) {
|
|
|
|
if (fp->f_type == DTYPE_CAPABILITY)
|
|
|
|
*haverightsp = cap_rights(fp);
|
|
|
|
else
|
|
|
|
*haverightsp = CAP_MASK_VALID;
|
|
|
|
}
|
|
|
|
|
2011-07-05 13:45:10 +00:00
|
|
|
/*
|
|
|
|
* If a capability has been requested, return the capability directly.
|
|
|
|
* Otherwise, check capability rights, extract the underlying object,
|
|
|
|
* and check its access flags.
|
|
|
|
*/
|
|
|
|
if (fget_flags & FGET_GETCAP) {
|
|
|
|
if (fp->f_type != DTYPE_CAPABILITY) {
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (maxprotp == NULL)
|
|
|
|
error = cap_funwrap(fp, needrights, &fp_fromcap);
|
|
|
|
else
|
|
|
|
error = cap_funwrap_mmap(fp, needrights, maxprotp,
|
|
|
|
&fp_fromcap);
|
2012-07-09 05:39:31 +00:00
|
|
|
if (error != 0) {
|
2011-07-05 13:45:10 +00:00
|
|
|
fdrop(fp, td);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we've unwrapped a file, drop the original capability
|
|
|
|
* and hold the new descriptor. fp after this point refers to
|
|
|
|
* the actual (unwrapped) object, not the capability.
|
|
|
|
*/
|
|
|
|
if (fp != fp_fromcap) {
|
|
|
|
fhold(fp_fromcap);
|
|
|
|
fdrop(fp, td);
|
|
|
|
fp = fp_fromcap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else /* !CAPABILITIES */
|
|
|
|
KASSERT(fp->f_type != DTYPE_CAPABILITY,
|
|
|
|
("%s: saw capability", __func__));
|
|
|
|
if (maxprotp != NULL)
|
|
|
|
*maxprotp = VM_PROT_ALL;
|
|
|
|
#endif /* CAPABILITIES */
|
|
|
|
|
2001-11-14 06:30:36 +00:00
|
|
|
/*
|
2006-01-06 16:30:30 +00:00
|
|
|
* FREAD and FWRITE failure return EBADF as per POSIX.
|
2001-11-14 06:30:36 +00:00
|
|
|
*/
|
2012-07-09 05:39:31 +00:00
|
|
|
error = 0;
|
|
|
|
switch (flags) {
|
|
|
|
case FREAD:
|
|
|
|
case FWRITE:
|
|
|
|
if ((fp->f_flag & flags) == 0)
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
case FEXEC:
|
|
|
|
if ((fp->f_flag & (FREAD | FEXEC)) == 0 ||
|
|
|
|
((fp->f_flag & FWRITE) != 0))
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
KASSERT(0, ("wrong flags"));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error != 0) {
|
2009-05-14 03:24:22 +00:00
|
|
|
fdrop(fp, td);
|
2012-07-09 05:39:31 +00:00
|
|
|
return (error);
|
2002-01-14 00:13:45 +00:00
|
|
|
}
|
2012-07-09 05:39:31 +00:00
|
|
|
|
2001-11-14 06:30:36 +00:00
|
|
|
*fpp = fp;
|
2002-10-16 15:45:37 +00:00
|
|
|
return (0);
|
2001-11-14 06:30:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2011-08-11 12:30:23 +00:00
|
|
|
fget(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
|
|
|
|
{
|
|
|
|
|
|
|
|
return(_fget(td, fd, fpp, 0, rights, NULL, NULL, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
fget_mmap(struct thread *td, int fd, cap_rights_t rights, u_char *maxprotp,
|
|
|
|
struct file **fpp)
|
2001-11-14 06:30:36 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
return (_fget(td, fd, fpp, 0, rights, NULL, maxprotp, 0));
|
2001-11-14 06:30:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2011-08-11 12:30:23 +00:00
|
|
|
fget_read(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
|
2001-11-14 06:30:36 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
return(_fget(td, fd, fpp, FREAD, rights, NULL, NULL, 0));
|
2001-11-14 06:30:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2011-08-11 12:30:23 +00:00
|
|
|
fget_write(struct thread *td, int fd, cap_rights_t rights, struct file **fpp)
|
2001-11-14 06:30:36 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
return (_fget(td, fd, fpp, FWRITE, rights, NULL, NULL, 0));
|
2011-07-05 13:45:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-08-11 12:30:23 +00:00
|
|
|
* Unlike the other fget() calls, which accept and check capability rights
|
2011-07-05 13:45:10 +00:00
|
|
|
* but never return capabilities, fgetcap() returns the capability but doesn't
|
|
|
|
* check capability rights.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
fgetcap(struct thread *td, int fd, struct file **fpp)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (_fget(td, fd, fpp, 0, 0, NULL, NULL, FGET_GETCAP));
|
2001-11-14 06:30:36 +00:00
|
|
|
}
|
|
|
|
|
2011-07-05 13:45:10 +00:00
|
|
|
|
2001-11-14 06:30:36 +00:00
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Like fget() but loads the underlying vnode, or returns an error if the
|
|
|
|
* descriptor does not represent a vnode. Note that pipes use vnodes but
|
|
|
|
* never have VM objects. The returned vnode will be vref()'d.
|
2004-12-02 11:56:13 +00:00
|
|
|
*
|
|
|
|
* XXX: what about the unused flags ?
|
2001-11-14 06:30:36 +00:00
|
|
|
*/
|
2002-10-16 15:45:37 +00:00
|
|
|
static __inline int
|
2011-08-11 12:30:23 +00:00
|
|
|
_fgetvp(struct thread *td, int fd, int flags, cap_rights_t needrights,
|
|
|
|
cap_rights_t *haverightsp, struct vnode **vpp)
|
2001-11-14 06:30:36 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
2002-01-14 00:13:45 +00:00
|
|
|
int error;
|
2001-11-14 06:30:36 +00:00
|
|
|
|
|
|
|
*vpp = NULL;
|
2011-08-11 12:30:23 +00:00
|
|
|
if ((error = _fget(td, fd, &fp, flags, needrights, haverightsp,
|
|
|
|
NULL, 0)) != 0)
|
2002-01-14 00:13:45 +00:00
|
|
|
return (error);
|
2003-07-04 12:20:27 +00:00
|
|
|
if (fp->f_vnode == NULL) {
|
2002-01-14 00:13:45 +00:00
|
|
|
error = EINVAL;
|
|
|
|
} else {
|
2003-06-22 08:41:43 +00:00
|
|
|
*vpp = fp->f_vnode;
|
2002-01-14 00:13:45 +00:00
|
|
|
vref(*vpp);
|
|
|
|
}
|
2009-05-14 03:24:22 +00:00
|
|
|
fdrop(fp, td);
|
|
|
|
|
2002-01-14 00:13:45 +00:00
|
|
|
return (error);
|
2001-11-14 06:30:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2011-08-11 12:30:23 +00:00
|
|
|
fgetvp(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
|
2001-11-14 06:30:36 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
return (_fgetvp(td, fd, 0, rights, NULL, vpp));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
fgetvp_rights(struct thread *td, int fd, cap_rights_t need, cap_rights_t *have,
|
|
|
|
struct vnode **vpp)
|
|
|
|
{
|
|
|
|
return (_fgetvp(td, fd, 0, need, have, vpp));
|
2001-11-14 06:30:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2011-08-11 12:30:23 +00:00
|
|
|
fgetvp_read(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
|
2001-11-14 06:30:36 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
return (_fgetvp(td, fd, FREAD, rights, NULL, vpp));
|
2001-11-14 06:30:36 +00:00
|
|
|
}
|
|
|
|
|
2012-07-08 00:51:38 +00:00
|
|
|
int
|
|
|
|
fgetvp_exec(struct thread *td, int fd, cap_rights_t rights, struct vnode **vpp)
|
|
|
|
{
|
|
|
|
|
2012-07-09 05:39:31 +00:00
|
|
|
return (_fgetvp(td, fd, FEXEC, rights, NULL, vpp));
|
2012-07-08 00:51:38 +00:00
|
|
|
}
|
|
|
|
|
2004-12-02 11:56:13 +00:00
|
|
|
#ifdef notyet
|
2001-11-14 06:30:36 +00:00
|
|
|
int
|
2011-08-11 12:30:23 +00:00
|
|
|
fgetvp_write(struct thread *td, int fd, cap_rights_t rights,
|
|
|
|
struct vnode **vpp)
|
2001-11-14 06:30:36 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
return (_fgetvp(td, fd, FWRITE, rights, NULL, vpp));
|
2001-11-14 06:30:36 +00:00
|
|
|
}
|
2004-12-02 11:56:13 +00:00
|
|
|
#endif
|
2001-11-14 06:30:36 +00:00
|
|
|
|
2001-11-17 03:07:11 +00:00
|
|
|
/*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* Like fget() but loads the underlying socket, or returns an error if the
|
|
|
|
* descriptor does not represent a socket.
|
2001-11-17 03:07:11 +00:00
|
|
|
*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* We bump the ref count on the returned socket. XXX Also obtain the SX lock
|
|
|
|
* in the future.
|
2006-04-01 11:09:54 +00:00
|
|
|
*
|
2008-10-12 20:03:17 +00:00
|
|
|
* Note: fgetsock() and fputsock() are deprecated, as consumers should rely
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* on their file descriptor reference to prevent the socket from being free'd
|
|
|
|
* during use.
|
2001-11-17 03:07:11 +00:00
|
|
|
*/
|
|
|
|
int
|
2011-08-11 12:30:23 +00:00
|
|
|
fgetsock(struct thread *td, int fd, cap_rights_t rights, struct socket **spp,
|
|
|
|
u_int *fflagp)
|
2001-11-17 03:07:11 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
2002-01-14 00:13:45 +00:00
|
|
|
int error;
|
2001-11-17 03:07:11 +00:00
|
|
|
|
|
|
|
*spp = NULL;
|
2002-10-16 15:45:37 +00:00
|
|
|
if (fflagp != NULL)
|
2001-11-17 03:07:11 +00:00
|
|
|
*fflagp = 0;
|
2011-08-11 12:30:23 +00:00
|
|
|
if ((error = _fget(td, fd, &fp, 0, rights, NULL, NULL, 0)) != 0)
|
2002-01-14 00:13:45 +00:00
|
|
|
return (error);
|
|
|
|
if (fp->f_type != DTYPE_SOCKET) {
|
|
|
|
error = ENOTSOCK;
|
|
|
|
} else {
|
2003-01-13 00:33:17 +00:00
|
|
|
*spp = fp->f_data;
|
2002-01-14 00:13:45 +00:00
|
|
|
if (fflagp)
|
|
|
|
*fflagp = fp->f_flag;
|
2004-06-12 20:47:32 +00:00
|
|
|
SOCK_LOCK(*spp);
|
2002-01-14 00:13:45 +00:00
|
|
|
soref(*spp);
|
2004-06-12 20:47:32 +00:00
|
|
|
SOCK_UNLOCK(*spp);
|
2002-01-14 00:13:45 +00:00
|
|
|
}
|
2009-05-14 03:24:22 +00:00
|
|
|
fdrop(fp, td);
|
|
|
|
|
2002-10-16 15:45:37 +00:00
|
|
|
return (error);
|
2001-11-17 03:07:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-04-01 11:09:54 +00:00
|
|
|
* Drop the reference count on the socket and XXX release the SX lock in the
|
|
|
|
* future. The last reference closes the socket.
|
|
|
|
*
|
2008-10-12 20:03:17 +00:00
|
|
|
* Note: fputsock() is deprecated, see comment for fgetsock().
|
2001-11-17 03:07:11 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
fputsock(struct socket *so)
|
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
|
2004-10-18 22:19:43 +00:00
|
|
|
ACCEPT_LOCK();
|
2004-06-12 20:47:32 +00:00
|
|
|
SOCK_LOCK(so);
|
2011-02-16 21:29:13 +00:00
|
|
|
CURVNET_SET(so->so_vnet);
|
2001-11-17 03:07:11 +00:00
|
|
|
sorele(so);
|
2011-02-16 21:29:13 +00:00
|
|
|
CURVNET_RESTORE();
|
2001-11-17 03:07:11 +00:00
|
|
|
}
|
|
|
|
|
2002-01-13 12:58:14 +00:00
|
|
|
/*
|
2007-12-30 01:42:15 +00:00
|
|
|
* Handle the last reference to a file being closed.
|
2011-07-15 09:37:14 +00:00
|
|
|
*
|
|
|
|
* No special capability handling here, as the capability's fo_close will run
|
|
|
|
* instead of the object here, and perform any necessary drop on the object.
|
2002-01-13 12:58:14 +00:00
|
|
|
*/
|
2007-12-30 01:42:15 +00:00
|
|
|
int
|
|
|
|
_fdrop(struct file *fp, struct thread *td)
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2007-12-30 01:42:15 +00:00
|
|
|
error = 0;
|
|
|
|
if (fp->f_count != 0)
|
|
|
|
panic("fdrop: count %d", fp->f_count);
|
1999-08-04 18:53:50 +00:00
|
|
|
if (fp->f_ops != &badfileops)
|
2001-09-12 08:38:13 +00:00
|
|
|
error = fo_close(fp, td);
|
2007-12-30 01:42:15 +00:00
|
|
|
atomic_subtract_int(&openfiles, 1);
|
2004-12-02 12:17:27 +00:00
|
|
|
crfree(fp->f_cred);
|
2012-03-08 20:34:13 +00:00
|
|
|
free(fp->f_advice, M_FADVISE);
|
2004-12-02 12:17:27 +00:00
|
|
|
uma_zfree(file_zone, fp);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Apply an advisory lock on a file descriptor.
|
|
|
|
*
|
2007-03-05 13:10:58 +00:00
|
|
|
* Just attempt to get a record lock of the requested type on the entire file
|
|
|
|
* (l_whence = SEEK_SET, l_start = 0, l_len = 0).
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-11-12 06:43:28 +00:00
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
1994-05-24 10:09:53 +00:00
|
|
|
struct flock_args {
|
|
|
|
int fd;
|
|
|
|
int how;
|
|
|
|
};
|
1995-11-12 06:43:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2011-09-16 13:58:51 +00:00
|
|
|
sys_flock(struct thread *td, struct flock_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-01-14 00:13:45 +00:00
|
|
|
struct file *fp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct flock lf;
|
2007-07-03 21:26:06 +00:00
|
|
|
int vfslocked;
|
2001-09-01 19:04:37 +00:00
|
|
|
int error;
|
|
|
|
|
2011-08-11 12:30:23 +00:00
|
|
|
if ((error = fget(td, uap->fd, CAP_FLOCK, &fp)) != 0)
|
2002-01-14 00:13:45 +00:00
|
|
|
return (error);
|
2001-09-01 19:04:37 +00:00
|
|
|
if (fp->f_type != DTYPE_VNODE) {
|
2002-01-13 11:58:06 +00:00
|
|
|
fdrop(fp, td);
|
|
|
|
return (EOPNOTSUPP);
|
2001-09-01 19:04:37 +00:00
|
|
|
}
|
2002-01-13 11:58:06 +00:00
|
|
|
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
2007-07-03 21:26:06 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
1994-05-24 10:09:53 +00:00
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
if (uap->how & LOCK_UN) {
|
|
|
|
lf.l_type = F_UNLCK;
|
2007-12-30 01:42:15 +00:00
|
|
|
atomic_clear_int(&fp->f_flag, FHASLOCK);
|
2001-09-01 19:04:37 +00:00
|
|
|
error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
|
|
|
|
goto done2;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
if (uap->how & LOCK_EX)
|
|
|
|
lf.l_type = F_WRLCK;
|
|
|
|
else if (uap->how & LOCK_SH)
|
|
|
|
lf.l_type = F_RDLCK;
|
2001-09-01 19:04:37 +00:00
|
|
|
else {
|
|
|
|
error = EBADF;
|
|
|
|
goto done2;
|
|
|
|
}
|
2007-12-30 01:42:15 +00:00
|
|
|
atomic_set_int(&fp->f_flag, FHASLOCK);
|
2002-01-13 11:58:06 +00:00
|
|
|
error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
|
|
|
|
(uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
|
2001-09-01 19:04:37 +00:00
|
|
|
done2:
|
2002-01-13 11:58:06 +00:00
|
|
|
fdrop(fp, td);
|
2007-07-03 21:26:06 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2001-09-01 19:04:37 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Duplicate the specified descriptor to a free descriptor.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2012-06-13 21:32:35 +00:00
|
|
|
dupfdopen(struct thread *td, struct filedesc *fdp, int dfd, int mode, int openerror, int *indxp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
2012-06-13 21:32:35 +00:00
|
|
|
int error, indx;
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2012-06-13 21:32:35 +00:00
|
|
|
KASSERT(openerror == ENODEV || openerror == ENXIO,
|
|
|
|
("unexpected error %d in %s", openerror, __func__));
|
2012-06-13 19:00:29 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If the to-be-dup'd fd number is greater than the allowed number
|
|
|
|
* of file descriptors, or the fd to be dup'd has already been
|
2000-11-18 21:01:04 +00:00
|
|
|
* closed, then reject.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2012-06-14 16:25:10 +00:00
|
|
|
if ((fp = fget_locked(fdp, dfd)) == NULL) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBADF);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2012-06-13 21:32:35 +00:00
|
|
|
error = fdalloc(td, 0, &indx);
|
|
|
|
if (error != 0) {
|
|
|
|
FILEDESC_XUNLOCK(fdp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* There are two cases of interest here.
|
|
|
|
*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* For ENODEV simply dup (dfd) to file descriptor (indx) and return.
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
* For ENXIO steal away the file structure from (dfd) and store it in
|
|
|
|
* (indx). (dfd) is effectively closed by this operation.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2012-06-13 21:32:35 +00:00
|
|
|
switch (openerror) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case ENODEV:
|
|
|
|
/*
|
|
|
|
* Check that the mode the file is being opened for is a
|
|
|
|
* subset of the mode of the existing descriptor.
|
|
|
|
*/
|
2012-06-13 21:32:35 +00:00
|
|
|
if (((mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
|
|
|
|
fdunused(fdp, indx);
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EACCES);
|
2002-01-13 11:58:06 +00:00
|
|
|
}
|
2012-06-13 21:32:35 +00:00
|
|
|
fdp->fd_ofiles[indx] = fp;
|
1994-05-24 10:09:53 +00:00
|
|
|
fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
|
2012-06-13 21:32:35 +00:00
|
|
|
fhold(fp);
|
2012-06-13 19:00:29 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
case ENXIO:
|
|
|
|
/*
|
2002-10-16 15:45:37 +00:00
|
|
|
* Steal away the file pointer from dfd and stuff it into indx.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2012-06-13 21:32:35 +00:00
|
|
|
fdp->fd_ofiles[indx] = fp;
|
1994-05-24 10:09:53 +00:00
|
|
|
fdp->fd_ofiles[dfd] = NULL;
|
|
|
|
fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd];
|
|
|
|
fdp->fd_ofileflags[dfd] = 0;
|
2004-01-15 10:15:04 +00:00
|
|
|
fdunused(fdp, dfd);
|
2012-06-13 19:00:29 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2012-06-13 19:00:29 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2012-06-13 21:32:35 +00:00
|
|
|
*indxp = indx;
|
2012-06-13 19:00:29 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-11-14 08:58:35 +00:00
|
|
|
|
2004-12-14 08:23:18 +00:00
|
|
|
/*
|
2009-05-27 14:11:23 +00:00
|
|
|
* Scan all active processes and prisons to see if any of them have a current
|
|
|
|
* or root directory of `olddp'. If so, replace them with the new mount point.
|
2004-12-14 08:23:18 +00:00
|
|
|
*/
|
|
|
|
void
|
2004-12-14 09:09:51 +00:00
|
|
|
mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
|
2004-12-14 08:23:18 +00:00
|
|
|
{
|
|
|
|
struct filedesc *fdp;
|
2009-05-27 14:11:23 +00:00
|
|
|
struct prison *pr;
|
2004-12-14 08:23:18 +00:00
|
|
|
struct proc *p;
|
|
|
|
int nrele;
|
|
|
|
|
|
|
|
if (vrefcnt(olddp) == 1)
|
|
|
|
return;
|
2009-05-27 14:11:23 +00:00
|
|
|
nrele = 0;
|
2004-12-14 08:23:18 +00:00
|
|
|
sx_slock(&allproc_lock);
|
2007-01-17 14:58:53 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
2004-12-14 09:09:51 +00:00
|
|
|
fdp = fdhold(p);
|
|
|
|
if (fdp == NULL)
|
2004-12-14 08:23:18 +00:00
|
|
|
continue;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2004-12-14 08:23:18 +00:00
|
|
|
if (fdp->fd_cdir == olddp) {
|
|
|
|
vref(newdp);
|
|
|
|
fdp->fd_cdir = newdp;
|
|
|
|
nrele++;
|
|
|
|
}
|
|
|
|
if (fdp->fd_rdir == olddp) {
|
|
|
|
vref(newdp);
|
|
|
|
fdp->fd_rdir = newdp;
|
|
|
|
nrele++;
|
|
|
|
}
|
2009-05-27 14:11:23 +00:00
|
|
|
if (fdp->fd_jdir == olddp) {
|
|
|
|
vref(newdp);
|
|
|
|
fdp->fd_jdir = newdp;
|
|
|
|
nrele++;
|
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2004-12-14 20:48:40 +00:00
|
|
|
fddrop(fdp);
|
2004-12-14 08:23:18 +00:00
|
|
|
}
|
|
|
|
sx_sunlock(&allproc_lock);
|
|
|
|
if (rootvnode == olddp) {
|
|
|
|
vref(newdp);
|
|
|
|
rootvnode = newdp;
|
2009-05-27 14:11:23 +00:00
|
|
|
nrele++;
|
|
|
|
}
|
|
|
|
mtx_lock(&prison0.pr_mtx);
|
|
|
|
if (prison0.pr_root == olddp) {
|
|
|
|
vref(newdp);
|
|
|
|
prison0.pr_root = newdp;
|
|
|
|
nrele++;
|
|
|
|
}
|
|
|
|
mtx_unlock(&prison0.pr_mtx);
|
|
|
|
sx_slock(&allprison_lock);
|
|
|
|
TAILQ_FOREACH(pr, &allprison, pr_list) {
|
|
|
|
mtx_lock(&pr->pr_mtx);
|
|
|
|
if (pr->pr_root == olddp) {
|
|
|
|
vref(newdp);
|
|
|
|
pr->pr_root = newdp;
|
|
|
|
nrele++;
|
|
|
|
}
|
|
|
|
mtx_unlock(&pr->pr_mtx);
|
2004-12-14 08:23:18 +00:00
|
|
|
}
|
2009-05-27 14:11:23 +00:00
|
|
|
sx_sunlock(&allprison_lock);
|
|
|
|
while (nrele--)
|
|
|
|
vrele(olddp);
|
2004-12-14 08:23:18 +00:00
|
|
|
}
|
|
|
|
|
2003-06-02 16:05:32 +00:00
|
|
|
struct filedesc_to_leader *
|
2004-12-02 11:56:13 +00:00
|
|
|
filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
|
2003-06-02 16:05:32 +00:00
|
|
|
{
|
|
|
|
struct filedesc_to_leader *fdtol;
|
2004-01-11 19:39:14 +00:00
|
|
|
|
2008-10-23 20:26:15 +00:00
|
|
|
fdtol = malloc(sizeof(struct filedesc_to_leader),
|
2003-06-02 16:05:32 +00:00
|
|
|
M_FILEDESC_TO_LEADER,
|
|
|
|
M_WAITOK);
|
|
|
|
fdtol->fdl_refcount = 1;
|
|
|
|
fdtol->fdl_holdcount = 0;
|
|
|
|
fdtol->fdl_wakeup = 0;
|
|
|
|
fdtol->fdl_leader = leader;
|
|
|
|
if (old != NULL) {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
fdtol->fdl_next = old->fdl_next;
|
|
|
|
fdtol->fdl_prev = old;
|
|
|
|
old->fdl_next = fdtol;
|
|
|
|
fdtol->fdl_next->fdl_prev = fdtol;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_XUNLOCK(fdp);
|
2003-06-02 16:05:32 +00:00
|
|
|
} else {
|
|
|
|
fdtol->fdl_next = fdtol;
|
|
|
|
fdtol->fdl_prev = fdtol;
|
|
|
|
}
|
2004-01-15 10:15:04 +00:00
|
|
|
return (fdtol);
|
2003-06-02 16:05:32 +00:00
|
|
|
}
|
|
|
|
|
1995-11-14 08:58:35 +00:00
|
|
|
/*
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
* Get file structures globally.
|
1995-11-14 08:58:35 +00:00
|
|
|
*/
|
|
|
|
static int
|
2000-07-04 11:25:35 +00:00
|
|
|
sysctl_kern_file(SYSCTL_HANDLER_ARGS)
|
1995-11-14 08:58:35 +00:00
|
|
|
{
|
2002-10-16 15:45:37 +00:00
|
|
|
struct xfile xf;
|
2002-07-31 12:26:52 +00:00
|
|
|
struct filedesc *fdp;
|
1995-11-14 08:58:35 +00:00
|
|
|
struct file *fp;
|
2002-10-16 15:45:37 +00:00
|
|
|
struct proc *p;
|
2002-07-31 12:26:52 +00:00
|
|
|
int error, n;
|
1995-11-14 08:58:35 +00:00
|
|
|
|
2004-02-26 00:27:04 +00:00
|
|
|
error = sysctl_wire_old_buffer(req, 0);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2002-10-16 15:45:37 +00:00
|
|
|
if (req->oldptr == NULL) {
|
2007-12-30 01:42:15 +00:00
|
|
|
n = 0;
|
|
|
|
sx_slock(&allproc_lock);
|
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
|
|
|
if (p->p_state == PRS_NEW)
|
|
|
|
continue;
|
|
|
|
fdp = fdhold(p);
|
|
|
|
if (fdp == NULL)
|
|
|
|
continue;
|
|
|
|
/* overestimates sparse tables. */
|
2008-01-03 01:26:59 +00:00
|
|
|
if (fdp->fd_lastfile > 0)
|
|
|
|
n += fdp->fd_lastfile;
|
2007-12-30 01:42:15 +00:00
|
|
|
fddrop(fdp);
|
2002-07-31 12:26:52 +00:00
|
|
|
}
|
2007-12-30 01:42:15 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
2002-10-16 15:45:37 +00:00
|
|
|
return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
|
2002-01-13 11:58:06 +00:00
|
|
|
}
|
2002-07-31 12:26:52 +00:00
|
|
|
error = 0;
|
2002-10-16 15:45:37 +00:00
|
|
|
bzero(&xf, sizeof(xf));
|
|
|
|
xf.xf_size = sizeof(xf);
|
2002-07-31 12:26:52 +00:00
|
|
|
sx_slock(&allproc_lock);
|
2007-01-17 14:58:53 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
2002-07-31 12:26:52 +00:00
|
|
|
PROC_LOCK(p);
|
2011-03-24 18:40:11 +00:00
|
|
|
if (p->p_state == PRS_NEW) {
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
continue;
|
|
|
|
}
|
2003-07-28 16:03:53 +00:00
|
|
|
if (p_cansee(req->td, p) != 0) {
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
continue;
|
|
|
|
}
|
2002-07-31 12:26:52 +00:00
|
|
|
xf.xf_pid = p->p_pid;
|
|
|
|
xf.xf_uid = p->p_ucred->cr_uid;
|
2003-02-11 07:20:52 +00:00
|
|
|
PROC_UNLOCK(p);
|
2004-12-14 09:09:51 +00:00
|
|
|
fdp = fdhold(p);
|
|
|
|
if (fdp == NULL)
|
2002-07-31 12:26:52 +00:00
|
|
|
continue;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2004-12-14 20:48:40 +00:00
|
|
|
for (n = 0; fdp->fd_refcnt > 0 && n < fdp->fd_nfiles; ++n) {
|
2002-07-31 12:26:52 +00:00
|
|
|
if ((fp = fdp->fd_ofiles[n]) == NULL)
|
|
|
|
continue;
|
|
|
|
xf.xf_fd = n;
|
|
|
|
xf.xf_file = fp;
|
2003-01-13 00:33:17 +00:00
|
|
|
xf.xf_data = fp->f_data;
|
2004-06-19 11:40:08 +00:00
|
|
|
xf.xf_vnode = fp->f_vnode;
|
2003-01-13 00:33:17 +00:00
|
|
|
xf.xf_type = fp->f_type;
|
|
|
|
xf.xf_count = fp->f_count;
|
2007-12-30 01:42:15 +00:00
|
|
|
xf.xf_msgcount = 0;
|
2012-07-02 21:01:03 +00:00
|
|
|
xf.xf_offset = foffset_get(fp);
|
2003-01-13 00:33:17 +00:00
|
|
|
xf.xf_flag = fp->f_flag;
|
2002-10-16 15:45:37 +00:00
|
|
|
error = SYSCTL_OUT(req, &xf, sizeof(xf));
|
2002-07-31 12:26:52 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
2002-01-13 11:58:06 +00:00
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2004-12-14 09:09:51 +00:00
|
|
|
fddrop(fdp);
|
2002-07-31 12:26:52 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
1995-11-14 08:58:35 +00:00
|
|
|
}
|
2002-07-31 12:26:52 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
|
|
|
return (error);
|
1995-11-14 08:58:35 +00:00
|
|
|
}
|
1995-12-04 16:48:58 +00:00
|
|
|
|
|
|
|
SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
|
2002-07-31 12:26:52 +00:00
|
|
|
0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
|
1995-12-04 16:48:58 +00:00
|
|
|
|
2008-12-02 06:50:26 +00:00
|
|
|
#ifdef KINFO_OFILE_SIZE
|
|
|
|
CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef COMPAT_FREEBSD7
|
|
|
|
static int
|
|
|
|
export_vnode_for_osysctl(struct vnode *vp, int type,
|
|
|
|
struct kinfo_ofile *kif, struct filedesc *fdp, struct sysctl_req *req)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
char *fullpath, *freepath;
|
|
|
|
int vfslocked;
|
|
|
|
|
|
|
|
bzero(kif, sizeof(*kif));
|
|
|
|
kif->kf_structsize = sizeof(*kif);
|
|
|
|
|
|
|
|
vref(vp);
|
|
|
|
kif->kf_fd = type;
|
|
|
|
kif->kf_type = KF_TYPE_VNODE;
|
|
|
|
/* This function only handles directories. */
|
2009-02-14 21:12:24 +00:00
|
|
|
if (vp->v_type != VDIR) {
|
|
|
|
vrele(vp);
|
|
|
|
return (ENOTDIR);
|
|
|
|
}
|
2008-12-02 06:50:26 +00:00
|
|
|
kif->kf_vnode_type = KF_VTYPE_VDIR;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is not a true file descriptor, so we set a bogus refcount
|
|
|
|
* and offset to indicate these fields should be ignored.
|
|
|
|
*/
|
|
|
|
kif->kf_ref_count = -1;
|
|
|
|
kif->kf_offset = -1;
|
|
|
|
|
|
|
|
freepath = NULL;
|
|
|
|
fullpath = "-";
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
vn_fullpath(curthread, vp, &fullpath, &freepath);
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
vrele(vp);
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
|
|
|
|
if (freepath != NULL)
|
|
|
|
free(freepath, M_TEMP);
|
|
|
|
error = SYSCTL_OUT(req, kif, sizeof(*kif));
|
|
|
|
FILEDESC_SLOCK(fdp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get per-process file descriptors for use by procstat(1), et al.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
char *fullpath, *freepath;
|
|
|
|
struct kinfo_ofile *kif;
|
|
|
|
struct filedesc *fdp;
|
|
|
|
int error, i, *name;
|
2012-04-01 18:22:48 +00:00
|
|
|
struct shmfd *shmfd;
|
2008-12-02 06:50:26 +00:00
|
|
|
struct socket *so;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct file *fp;
|
|
|
|
struct proc *p;
|
|
|
|
struct tty *tp;
|
|
|
|
int vfslocked;
|
|
|
|
|
|
|
|
name = (int *)arg1;
|
|
|
|
if ((p = pfind((pid_t)name[0])) == NULL)
|
|
|
|
return (ESRCH);
|
|
|
|
if ((error = p_candebug(curthread, p))) {
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
fdp = fdhold(p);
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
if (fdp == NULL)
|
|
|
|
return (ENOENT);
|
|
|
|
kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
|
|
|
|
FILEDESC_SLOCK(fdp);
|
|
|
|
if (fdp->fd_cdir != NULL)
|
|
|
|
export_vnode_for_osysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif,
|
|
|
|
fdp, req);
|
|
|
|
if (fdp->fd_rdir != NULL)
|
|
|
|
export_vnode_for_osysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif,
|
|
|
|
fdp, req);
|
|
|
|
if (fdp->fd_jdir != NULL)
|
|
|
|
export_vnode_for_osysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif,
|
|
|
|
fdp, req);
|
|
|
|
for (i = 0; i < fdp->fd_nfiles; i++) {
|
|
|
|
if ((fp = fdp->fd_ofiles[i]) == NULL)
|
|
|
|
continue;
|
|
|
|
bzero(kif, sizeof(*kif));
|
|
|
|
kif->kf_structsize = sizeof(*kif);
|
|
|
|
vp = NULL;
|
|
|
|
so = NULL;
|
|
|
|
tp = NULL;
|
2012-04-01 18:22:48 +00:00
|
|
|
shmfd = NULL;
|
2008-12-02 06:50:26 +00:00
|
|
|
kif->kf_fd = i;
|
2011-07-20 09:53:35 +00:00
|
|
|
|
|
|
|
#ifdef CAPABILITIES
|
|
|
|
/*
|
|
|
|
* When reporting a capability, most fields will be from the
|
|
|
|
* underlying object, but do mark as a capability. With
|
|
|
|
* ofiledesc, we don't have a field to export the cap_rights_t,
|
|
|
|
* but we do with the new filedesc.
|
|
|
|
*/
|
|
|
|
if (fp->f_type == DTYPE_CAPABILITY) {
|
|
|
|
kif->kf_flags |= KF_FLAG_CAPABILITY;
|
|
|
|
(void)cap_funwrap(fp, 0, &fp);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
KASSERT(fp->f_type != DTYPE_CAPABILITY,
|
|
|
|
("sysctl_kern_proc_ofiledesc: saw capability"));
|
|
|
|
#endif
|
2008-12-02 06:50:26 +00:00
|
|
|
switch (fp->f_type) {
|
|
|
|
case DTYPE_VNODE:
|
|
|
|
kif->kf_type = KF_TYPE_VNODE;
|
|
|
|
vp = fp->f_vnode;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_SOCKET:
|
|
|
|
kif->kf_type = KF_TYPE_SOCKET;
|
|
|
|
so = fp->f_data;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_PIPE:
|
|
|
|
kif->kf_type = KF_TYPE_PIPE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_FIFO:
|
|
|
|
kif->kf_type = KF_TYPE_FIFO;
|
|
|
|
vp = fp->f_vnode;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_KQUEUE:
|
|
|
|
kif->kf_type = KF_TYPE_KQUEUE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_CRYPTO:
|
|
|
|
kif->kf_type = KF_TYPE_CRYPTO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_MQUEUE:
|
|
|
|
kif->kf_type = KF_TYPE_MQUEUE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_SHM:
|
|
|
|
kif->kf_type = KF_TYPE_SHM;
|
2012-04-01 18:22:48 +00:00
|
|
|
shmfd = fp->f_data;
|
2008-12-02 06:50:26 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_SEM:
|
|
|
|
kif->kf_type = KF_TYPE_SEM;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_PTS:
|
|
|
|
kif->kf_type = KF_TYPE_PTS;
|
|
|
|
tp = fp->f_data;
|
|
|
|
break;
|
|
|
|
|
Add experimental support for process descriptors
A "process descriptor" file descriptor is used to manage processes
without using the PID namespace. This is required for Capsicum's
Capability Mode, where the PID namespace is unavailable.
New system calls pdfork(2) and pdkill(2) offer the functional equivalents
of fork(2) and kill(2). pdgetpid(2) allows querying the PID of the remote
process for debugging purposes. The currently-unimplemented pdwait(2) will,
in the future, allow querying rusage/exit status. In the interim, poll(2)
may be used to check (and wait for) process termination.
When a process is referenced by a process descriptor, it does not issue
SIGCHLD to the parent, making it suitable for use in libraries---a common
scenario when using library compartmentalisation from within large
applications (such as web browsers). Some observers may note a similarity
to Mach task ports; process descriptors provide a subset of this behaviour,
but in a UNIX style.
This feature is enabled by "options PROCDESC", but as with several other
Capsicum kernel features, is not enabled by default in GENERIC 9.0.
Reviewed by: jhb, kib
Approved by: re (kib), mentor (rwatson)
Sponsored by: Google Inc
2011-08-18 22:51:30 +00:00
|
|
|
#ifdef PROCDESC
|
|
|
|
case DTYPE_PROCDESC:
|
|
|
|
kif->kf_type = KF_TYPE_PROCDESC;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
2008-12-02 06:50:26 +00:00
|
|
|
default:
|
|
|
|
kif->kf_type = KF_TYPE_UNKNOWN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
kif->kf_ref_count = fp->f_count;
|
|
|
|
if (fp->f_flag & FREAD)
|
|
|
|
kif->kf_flags |= KF_FLAG_READ;
|
|
|
|
if (fp->f_flag & FWRITE)
|
|
|
|
kif->kf_flags |= KF_FLAG_WRITE;
|
|
|
|
if (fp->f_flag & FAPPEND)
|
|
|
|
kif->kf_flags |= KF_FLAG_APPEND;
|
|
|
|
if (fp->f_flag & FASYNC)
|
|
|
|
kif->kf_flags |= KF_FLAG_ASYNC;
|
|
|
|
if (fp->f_flag & FFSYNC)
|
|
|
|
kif->kf_flags |= KF_FLAG_FSYNC;
|
|
|
|
if (fp->f_flag & FNONBLOCK)
|
|
|
|
kif->kf_flags |= KF_FLAG_NONBLOCK;
|
|
|
|
if (fp->f_flag & O_DIRECT)
|
|
|
|
kif->kf_flags |= KF_FLAG_DIRECT;
|
|
|
|
if (fp->f_flag & FHASLOCK)
|
|
|
|
kif->kf_flags |= KF_FLAG_HASLOCK;
|
2012-07-02 21:01:03 +00:00
|
|
|
kif->kf_offset = foffset_get(fp);
|
2008-12-02 06:50:26 +00:00
|
|
|
if (vp != NULL) {
|
|
|
|
vref(vp);
|
|
|
|
switch (vp->v_type) {
|
|
|
|
case VNON:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VNON;
|
|
|
|
break;
|
|
|
|
case VREG:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VREG;
|
|
|
|
break;
|
|
|
|
case VDIR:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VDIR;
|
|
|
|
break;
|
|
|
|
case VBLK:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VBLK;
|
|
|
|
break;
|
|
|
|
case VCHR:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VCHR;
|
|
|
|
break;
|
|
|
|
case VLNK:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VLNK;
|
|
|
|
break;
|
|
|
|
case VSOCK:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VSOCK;
|
|
|
|
break;
|
|
|
|
case VFIFO:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VFIFO;
|
|
|
|
break;
|
|
|
|
case VBAD:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_VBAD;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
kif->kf_vnode_type = KF_VTYPE_UNKNOWN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* It is OK to drop the filedesc lock here as we will
|
|
|
|
* re-validate and re-evaluate its properties when
|
|
|
|
* the loop continues.
|
|
|
|
*/
|
|
|
|
freepath = NULL;
|
|
|
|
fullpath = "-";
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
vn_fullpath(curthread, vp, &fullpath, &freepath);
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
vrele(vp);
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
strlcpy(kif->kf_path, fullpath,
|
|
|
|
sizeof(kif->kf_path));
|
|
|
|
if (freepath != NULL)
|
|
|
|
free(freepath, M_TEMP);
|
|
|
|
FILEDESC_SLOCK(fdp);
|
|
|
|
}
|
|
|
|
if (so != NULL) {
|
|
|
|
struct sockaddr *sa;
|
|
|
|
|
|
|
|
if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa)
|
|
|
|
== 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
|
|
|
|
bcopy(sa, &kif->kf_sa_local, sa->sa_len);
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
}
|
|
|
|
if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa)
|
2010-04-23 14:32:58 +00:00
|
|
|
== 0 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
|
2008-12-02 06:50:26 +00:00
|
|
|
bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
}
|
|
|
|
kif->kf_sock_domain =
|
|
|
|
so->so_proto->pr_domain->dom_family;
|
|
|
|
kif->kf_sock_type = so->so_type;
|
|
|
|
kif->kf_sock_protocol = so->so_proto->pr_protocol;
|
|
|
|
}
|
|
|
|
if (tp != NULL) {
|
|
|
|
strlcpy(kif->kf_path, tty_devname(tp),
|
|
|
|
sizeof(kif->kf_path));
|
|
|
|
}
|
2012-04-01 18:22:48 +00:00
|
|
|
if (shmfd != NULL)
|
|
|
|
shm_path(shmfd, kif->kf_path, sizeof(kif->kf_path));
|
2008-12-02 06:50:26 +00:00
|
|
|
error = SYSCTL_OUT(req, kif, sizeof(*kif));
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
fddrop(fdp);
|
|
|
|
free(kif, M_TEMP);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc, CTLFLAG_RD,
|
|
|
|
sysctl_kern_proc_ofiledesc, "Process ofiledesc entries");
|
|
|
|
#endif /* COMPAT_FREEBSD7 */
|
|
|
|
|
|
|
|
#ifdef KINFO_FILE_SIZE
|
|
|
|
CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
|
|
|
|
#endif
|
|
|
|
|
2008-02-09 05:16:26 +00:00
|
|
|
static int
|
2011-05-12 10:11:39 +00:00
|
|
|
export_fd_for_sysctl(void *data, int type, int fd, int fflags, int refcnt,
|
2011-10-12 12:08:03 +00:00
|
|
|
int64_t offset, int fd_is_cap, cap_rights_t fd_cap_rights,
|
|
|
|
struct kinfo_file *kif, struct sysctl_req *req)
|
2011-05-12 10:11:39 +00:00
|
|
|
{
|
|
|
|
struct {
|
|
|
|
int fflag;
|
|
|
|
int kf_fflag;
|
|
|
|
} fflags_table[] = {
|
|
|
|
{ FAPPEND, KF_FLAG_APPEND },
|
|
|
|
{ FASYNC, KF_FLAG_ASYNC },
|
|
|
|
{ FFSYNC, KF_FLAG_FSYNC },
|
|
|
|
{ FHASLOCK, KF_FLAG_HASLOCK },
|
|
|
|
{ FNONBLOCK, KF_FLAG_NONBLOCK },
|
|
|
|
{ FREAD, KF_FLAG_READ },
|
|
|
|
{ FWRITE, KF_FLAG_WRITE },
|
|
|
|
{ O_CREAT, KF_FLAG_CREAT },
|
|
|
|
{ O_DIRECT, KF_FLAG_DIRECT },
|
|
|
|
{ O_EXCL, KF_FLAG_EXCL },
|
|
|
|
{ O_EXEC, KF_FLAG_EXEC },
|
|
|
|
{ O_EXLOCK, KF_FLAG_EXLOCK },
|
|
|
|
{ O_NOFOLLOW, KF_FLAG_NOFOLLOW },
|
|
|
|
{ O_SHLOCK, KF_FLAG_SHLOCK },
|
|
|
|
{ O_TRUNC, KF_FLAG_TRUNC }
|
|
|
|
};
|
|
|
|
#define NFFLAGS (sizeof(fflags_table) / sizeof(*fflags_table))
|
|
|
|
struct vnode *vp;
|
|
|
|
int error, vfslocked;
|
|
|
|
unsigned int i;
|
2008-02-09 05:16:26 +00:00
|
|
|
|
|
|
|
bzero(kif, sizeof(*kif));
|
2011-05-12 10:11:39 +00:00
|
|
|
switch (type) {
|
|
|
|
case KF_TYPE_FIFO:
|
|
|
|
case KF_TYPE_VNODE:
|
|
|
|
vp = (struct vnode *)data;
|
|
|
|
error = fill_vnode_info(vp, kif);
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2009-02-14 21:12:24 +00:00
|
|
|
vrele(vp);
|
2011-05-12 10:11:39 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
break;
|
|
|
|
case KF_TYPE_SOCKET:
|
|
|
|
error = fill_socket_info((struct socket *)data, kif);
|
|
|
|
break;
|
|
|
|
case KF_TYPE_PIPE:
|
|
|
|
error = fill_pipe_info((struct pipe *)data, kif);
|
|
|
|
break;
|
|
|
|
case KF_TYPE_PTS:
|
|
|
|
error = fill_pts_info((struct tty *)data, kif);
|
|
|
|
break;
|
Add experimental support for process descriptors
A "process descriptor" file descriptor is used to manage processes
without using the PID namespace. This is required for Capsicum's
Capability Mode, where the PID namespace is unavailable.
New system calls pdfork(2) and pdkill(2) offer the functional equivalents
of fork(2) and kill(2). pdgetpid(2) allows querying the PID of the remote
process for debugging purposes. The currently-unimplemented pdwait(2) will,
in the future, allow querying rusage/exit status. In the interim, poll(2)
may be used to check (and wait for) process termination.
When a process is referenced by a process descriptor, it does not issue
SIGCHLD to the parent, making it suitable for use in libraries---a common
scenario when using library compartmentalisation from within large
applications (such as web browsers). Some observers may note a similarity
to Mach task ports; process descriptors provide a subset of this behaviour,
but in a UNIX style.
This feature is enabled by "options PROCDESC", but as with several other
Capsicum kernel features, is not enabled by default in GENERIC 9.0.
Reviewed by: jhb, kib
Approved by: re (kib), mentor (rwatson)
Sponsored by: Google Inc
2011-08-18 22:51:30 +00:00
|
|
|
case KF_TYPE_PROCDESC:
|
|
|
|
error = fill_procdesc_info((struct procdesc *)data, kif);
|
|
|
|
break;
|
2012-04-01 18:22:48 +00:00
|
|
|
case KF_TYPE_SHM:
|
|
|
|
error = fill_shm_info((struct file *)data, kif);
|
|
|
|
break;
|
2011-05-12 10:11:39 +00:00
|
|
|
default:
|
|
|
|
error = 0;
|
2009-02-14 21:12:24 +00:00
|
|
|
}
|
2011-05-12 10:11:39 +00:00
|
|
|
if (error == 0)
|
|
|
|
kif->kf_status |= KF_ATTR_VALID;
|
2008-02-09 05:16:26 +00:00
|
|
|
|
|
|
|
/*
|
2011-05-12 10:11:39 +00:00
|
|
|
* Translate file access flags.
|
2008-02-09 05:16:26 +00:00
|
|
|
*/
|
2011-05-12 10:11:39 +00:00
|
|
|
for (i = 0; i < NFFLAGS; i++)
|
|
|
|
if (fflags & fflags_table[i].fflag)
|
|
|
|
kif->kf_flags |= fflags_table[i].kf_fflag;
|
2011-10-12 12:08:03 +00:00
|
|
|
if (fd_is_cap)
|
|
|
|
kif->kf_flags |= KF_FLAG_CAPABILITY;
|
|
|
|
if (fd_is_cap)
|
|
|
|
kif->kf_cap_rights = fd_cap_rights;
|
2011-05-12 10:11:39 +00:00
|
|
|
kif->kf_fd = fd;
|
|
|
|
kif->kf_type = type;
|
|
|
|
kif->kf_ref_count = refcnt;
|
|
|
|
kif->kf_offset = offset;
|
2008-12-02 06:50:26 +00:00
|
|
|
/* Pack record size down */
|
|
|
|
kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
|
|
|
|
strlen(kif->kf_path) + 1;
|
|
|
|
kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t));
|
|
|
|
error = SYSCTL_OUT(req, kif, kif->kf_structsize);
|
2008-02-09 05:16:26 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
/*
|
|
|
|
* Get per-process file descriptors for use by procstat(1), et al.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct file *fp;
|
2011-05-12 10:11:39 +00:00
|
|
|
struct filedesc *fdp;
|
|
|
|
struct kinfo_file *kif;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
struct proc *p;
|
2011-05-12 10:11:39 +00:00
|
|
|
struct vnode *cttyvp, *textvp, *tracevp;
|
2009-05-15 14:41:44 +00:00
|
|
|
size_t oldidx;
|
2011-05-12 10:11:39 +00:00
|
|
|
int64_t offset;
|
|
|
|
void *data;
|
|
|
|
int error, i, *name;
|
2011-10-12 12:08:03 +00:00
|
|
|
int fd_is_cap, type, refcnt, fflags;
|
|
|
|
cap_rights_t fd_cap_rights;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
|
|
|
|
name = (int *)arg1;
|
|
|
|
if ((p = pfind((pid_t)name[0])) == NULL)
|
|
|
|
return (ESRCH);
|
|
|
|
if ((error = p_candebug(curthread, p))) {
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (error);
|
|
|
|
}
|
2011-05-12 10:11:39 +00:00
|
|
|
/* ktrace vnode */
|
|
|
|
tracevp = p->p_tracevp;
|
|
|
|
if (tracevp != NULL)
|
|
|
|
vref(tracevp);
|
|
|
|
/* text vnode */
|
|
|
|
textvp = p->p_textvp;
|
|
|
|
if (textvp != NULL)
|
|
|
|
vref(textvp);
|
|
|
|
/* Controlling tty. */
|
|
|
|
cttyvp = NULL;
|
|
|
|
if (p->p_pgrp != NULL && p->p_pgrp->pg_session != NULL) {
|
|
|
|
cttyvp = p->p_pgrp->pg_session->s_ttyvp;
|
|
|
|
if (cttyvp != NULL)
|
|
|
|
vref(cttyvp);
|
|
|
|
}
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
fdp = fdhold(p);
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
|
2011-05-12 10:11:39 +00:00
|
|
|
if (tracevp != NULL)
|
|
|
|
export_fd_for_sysctl(tracevp, KF_TYPE_VNODE, KF_FD_TYPE_TRACE,
|
2011-10-12 12:08:03 +00:00
|
|
|
FREAD | FWRITE, -1, -1, 0, 0, kif, req);
|
2011-05-12 10:11:39 +00:00
|
|
|
if (textvp != NULL)
|
|
|
|
export_fd_for_sysctl(textvp, KF_TYPE_VNODE, KF_FD_TYPE_TEXT,
|
2011-10-12 12:08:03 +00:00
|
|
|
FREAD, -1, -1, 0, 0, kif, req);
|
2011-05-12 10:11:39 +00:00
|
|
|
if (cttyvp != NULL)
|
|
|
|
export_fd_for_sysctl(cttyvp, KF_TYPE_VNODE, KF_FD_TYPE_CTTY,
|
2011-10-12 12:08:03 +00:00
|
|
|
FREAD | FWRITE, -1, -1, 0, 0, kif, req);
|
2011-05-12 10:11:39 +00:00
|
|
|
if (fdp == NULL)
|
|
|
|
goto fail;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2011-05-12 10:11:39 +00:00
|
|
|
/* working directory */
|
|
|
|
if (fdp->fd_cdir != NULL) {
|
|
|
|
vref(fdp->fd_cdir);
|
|
|
|
data = fdp->fd_cdir;
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
export_fd_for_sysctl(data, KF_TYPE_VNODE, KF_FD_TYPE_CWD,
|
2011-10-12 12:08:03 +00:00
|
|
|
FREAD, -1, -1, 0, 0, kif, req);
|
2011-05-12 10:11:39 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
|
|
|
}
|
|
|
|
/* root directory */
|
|
|
|
if (fdp->fd_rdir != NULL) {
|
|
|
|
vref(fdp->fd_rdir);
|
|
|
|
data = fdp->fd_rdir;
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
export_fd_for_sysctl(data, KF_TYPE_VNODE, KF_FD_TYPE_ROOT,
|
2011-10-12 12:08:03 +00:00
|
|
|
FREAD, -1, -1, 0, 0, kif, req);
|
2011-05-12 10:11:39 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
|
|
|
}
|
|
|
|
/* jail directory */
|
|
|
|
if (fdp->fd_jdir != NULL) {
|
|
|
|
vref(fdp->fd_jdir);
|
|
|
|
data = fdp->fd_jdir;
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
|
|
|
export_fd_for_sysctl(data, KF_TYPE_VNODE, KF_FD_TYPE_JAIL,
|
2011-10-12 12:08:03 +00:00
|
|
|
FREAD, -1, -1, 0, 0, kif, req);
|
2011-05-12 10:11:39 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
|
|
|
}
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
for (i = 0; i < fdp->fd_nfiles; i++) {
|
|
|
|
if ((fp = fdp->fd_ofiles[i]) == NULL)
|
|
|
|
continue;
|
2011-05-12 10:11:39 +00:00
|
|
|
data = NULL;
|
2011-10-12 12:08:03 +00:00
|
|
|
fd_is_cap = 0;
|
|
|
|
fd_cap_rights = 0;
|
2011-07-20 09:53:35 +00:00
|
|
|
|
|
|
|
#ifdef CAPABILITIES
|
|
|
|
/*
|
|
|
|
* When reporting a capability, most fields will be from the
|
|
|
|
* underlying object, but do mark as a capability and export
|
|
|
|
* the capability rights mask.
|
|
|
|
*/
|
|
|
|
if (fp->f_type == DTYPE_CAPABILITY) {
|
2011-10-12 12:08:03 +00:00
|
|
|
fd_is_cap = 1;
|
|
|
|
fd_cap_rights = cap_rights(fp);
|
2011-07-20 09:53:35 +00:00
|
|
|
(void)cap_funwrap(fp, 0, &fp);
|
|
|
|
}
|
|
|
|
#else /* !CAPABILITIES */
|
|
|
|
KASSERT(fp->f_type != DTYPE_CAPABILITY,
|
|
|
|
("sysctl_kern_proc_filedesc: saw capability"));
|
|
|
|
#endif
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
switch (fp->f_type) {
|
|
|
|
case DTYPE_VNODE:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_VNODE;
|
|
|
|
vref(fp->f_vnode);
|
|
|
|
data = fp->f_vnode;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_SOCKET:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_SOCKET;
|
|
|
|
data = fp->f_data;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_PIPE:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_PIPE;
|
|
|
|
data = fp->f_data;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_FIFO:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_FIFO;
|
|
|
|
vref(fp->f_vnode);
|
|
|
|
data = fp->f_vnode;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_KQUEUE:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_KQUEUE;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_CRYPTO:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_CRYPTO;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DTYPE_MQUEUE:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_MQUEUE;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
|
|
|
|
2008-01-20 19:55:52 +00:00
|
|
|
case DTYPE_SHM:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_SHM;
|
2012-04-01 18:22:48 +00:00
|
|
|
data = fp;
|
2008-01-20 19:55:52 +00:00
|
|
|
break;
|
|
|
|
|
Rework the lifetime management of the kernel implementation of POSIX
semaphores. Specifically, semaphores are now represented as new file
descriptor type that is set to close on exec. This removes the need for
all of the manual process reference counting (and fork, exec, and exit
event handlers) as the normal file descriptor operations handle all of
that for us nicely. It is also suggested as one possible implementation
in the spec and at least one other OS (OS X) uses this approach.
Some bugs that were fixed as a result include:
- References to a named semaphore whose name is removed still work after
the sem_unlink() operation. Prior to this patch, if a semaphore's name
was removed, valid handles from sem_open() would get EINVAL errors from
sem_getvalue(), sem_post(), etc. This fixes that.
- Unnamed semaphores created with sem_init() were not cleaned up when a
process exited or exec'd. They were only cleaned up if the process
did an explicit sem_destroy(). This could result in a leak of semaphore
objects that could never be cleaned up.
- On the other hand, if another process guessed the id (kernel pointer to
'struct ksem' of an unnamed semaphore (created via sem_init)) and had
write access to the semaphore based on UID/GID checks, then that other
process could manipulate the semaphore via sem_destroy(), sem_post(),
sem_wait(), etc.
- As part of the permission check (UID/GID), the umask of the proces
creating the semaphore was not honored. Thus if your umask denied group
read/write access but the explicit mode in the sem_init() call allowed
it, the semaphore would be readable/writable by other users in the
same group, for example. This includes access via the previous bug.
- If the module refused to unload because there were active semaphores,
then it might have deregistered one or more of the semaphore system
calls before it noticed that there was a problem. I'm not sure if
this actually happened as the order that modules are discovered by the
kernel linker depends on how the actual .ko file is linked. One can
make the order deterministic by using a single module with a mod_event
handler that explicitly registers syscalls (and deregisters during
unload after any checks). This also fixes a race where even if the
sem_module unloaded first it would have destroyed locks that the
syscalls might be trying to access if they are still executing when
they are unloaded.
XXX: By the way, deregistering system calls doesn't do any blocking
to drain any threads from the calls.
- Some minor fixes to errno values on error. For example, sem_init()
isn't documented to return ENFILE or EMFILE if we run out of semaphores
the way that sem_open() can. Instead, it should return ENOSPC in that
case.
Other changes:
- Kernel semaphores now use a hash table to manage the namespace of
named semaphores nearly in a similar fashion to the POSIX shared memory
object file descriptors. Kernel semaphores can now also have names
longer than 14 chars (up to MAXPATHLEN) and can include subdirectories
in their pathname.
- The UID/GID permission checks for access to a named semaphore are now
done via vaccess() rather than a home-rolled set of checks.
- Now that kernel semaphores have an associated file object, the various
MAC checks for POSIX semaphores accept both a file credential and an
active credential. There is also a new posixsem_check_stat() since it
is possible to fstat() a semaphore file descriptor.
- A small set of regression tests (using the ksem API directly) is present
in src/tools/regression/posixsem.
Reported by: kris (1)
Tested by: kris
Reviewed by: rwatson (lightly)
MFC after: 1 month
2008-06-27 05:39:04 +00:00
|
|
|
case DTYPE_SEM:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_SEM;
|
Rework the lifetime management of the kernel implementation of POSIX
semaphores. Specifically, semaphores are now represented as new file
descriptor type that is set to close on exec. This removes the need for
all of the manual process reference counting (and fork, exec, and exit
event handlers) as the normal file descriptor operations handle all of
that for us nicely. It is also suggested as one possible implementation
in the spec and at least one other OS (OS X) uses this approach.
Some bugs that were fixed as a result include:
- References to a named semaphore whose name is removed still work after
the sem_unlink() operation. Prior to this patch, if a semaphore's name
was removed, valid handles from sem_open() would get EINVAL errors from
sem_getvalue(), sem_post(), etc. This fixes that.
- Unnamed semaphores created with sem_init() were not cleaned up when a
process exited or exec'd. They were only cleaned up if the process
did an explicit sem_destroy(). This could result in a leak of semaphore
objects that could never be cleaned up.
- On the other hand, if another process guessed the id (kernel pointer to
'struct ksem' of an unnamed semaphore (created via sem_init)) and had
write access to the semaphore based on UID/GID checks, then that other
process could manipulate the semaphore via sem_destroy(), sem_post(),
sem_wait(), etc.
- As part of the permission check (UID/GID), the umask of the proces
creating the semaphore was not honored. Thus if your umask denied group
read/write access but the explicit mode in the sem_init() call allowed
it, the semaphore would be readable/writable by other users in the
same group, for example. This includes access via the previous bug.
- If the module refused to unload because there were active semaphores,
then it might have deregistered one or more of the semaphore system
calls before it noticed that there was a problem. I'm not sure if
this actually happened as the order that modules are discovered by the
kernel linker depends on how the actual .ko file is linked. One can
make the order deterministic by using a single module with a mod_event
handler that explicitly registers syscalls (and deregisters during
unload after any checks). This also fixes a race where even if the
sem_module unloaded first it would have destroyed locks that the
syscalls might be trying to access if they are still executing when
they are unloaded.
XXX: By the way, deregistering system calls doesn't do any blocking
to drain any threads from the calls.
- Some minor fixes to errno values on error. For example, sem_init()
isn't documented to return ENFILE or EMFILE if we run out of semaphores
the way that sem_open() can. Instead, it should return ENOSPC in that
case.
Other changes:
- Kernel semaphores now use a hash table to manage the namespace of
named semaphores nearly in a similar fashion to the POSIX shared memory
object file descriptors. Kernel semaphores can now also have names
longer than 14 chars (up to MAXPATHLEN) and can include subdirectories
in their pathname.
- The UID/GID permission checks for access to a named semaphore are now
done via vaccess() rather than a home-rolled set of checks.
- Now that kernel semaphores have an associated file object, the various
MAC checks for POSIX semaphores accept both a file credential and an
active credential. There is also a new posixsem_check_stat() since it
is possible to fstat() a semaphore file descriptor.
- A small set of regression tests (using the ksem API directly) is present
in src/tools/regression/posixsem.
Reported by: kris (1)
Tested by: kris
Reviewed by: rwatson (lightly)
MFC after: 1 month
2008-06-27 05:39:04 +00:00
|
|
|
break;
|
|
|
|
|
Integrate the new MPSAFE TTY layer to the FreeBSD operating system.
The last half year I've been working on a replacement TTY layer for the
FreeBSD kernel. The new TTY layer was designed to improve the following:
- Improved driver model:
The old TTY layer has a driver model that is not abstract enough to
make it friendly to use. A good example is the output path, where the
device drivers directly access the output buffers. This means that an
in-kernel PPP implementation must always convert network buffers into
TTY buffers.
If a PPP implementation would be built on top of the new TTY layer
(still needs a hooks layer, though), it would allow the PPP
implementation to directly hand the data to the TTY driver.
- Improved hotplugging:
With the old TTY layer, it isn't entirely safe to destroy TTY's from
the system. This implementation has a two-step destructing design,
where the driver first abandons the TTY. After all threads have left
the TTY, the TTY layer calls a routine in the driver, which can be
used to free resources (unit numbers, etc).
The pts(4) driver also implements this feature, which means
posix_openpt() will now return PTY's that are created on the fly.
- Improved performance:
One of the major improvements is the per-TTY mutex, which is expected
to improve scalability when compared to the old Giant locking.
Another change is the unbuffered copying to userspace, which is both
used on TTY device nodes and PTY masters.
Upgrading should be quite straightforward. Unlike previous versions,
existing kernel configuration files do not need to be changed, except
when they reference device drivers that are listed in UPDATING.
Obtained from: //depot/projects/mpsafetty/...
Approved by: philip (ex-mentor)
Discussed: on the lists, at BSDCan, at the DevSummit
Sponsored by: Snow B.V., the Netherlands
dcons(4) fixed by: kan
2008-08-20 08:31:58 +00:00
|
|
|
case DTYPE_PTS:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_PTS;
|
|
|
|
data = fp->f_data;
|
Integrate the new MPSAFE TTY layer to the FreeBSD operating system.
The last half year I've been working on a replacement TTY layer for the
FreeBSD kernel. The new TTY layer was designed to improve the following:
- Improved driver model:
The old TTY layer has a driver model that is not abstract enough to
make it friendly to use. A good example is the output path, where the
device drivers directly access the output buffers. This means that an
in-kernel PPP implementation must always convert network buffers into
TTY buffers.
If a PPP implementation would be built on top of the new TTY layer
(still needs a hooks layer, though), it would allow the PPP
implementation to directly hand the data to the TTY driver.
- Improved hotplugging:
With the old TTY layer, it isn't entirely safe to destroy TTY's from
the system. This implementation has a two-step destructing design,
where the driver first abandons the TTY. After all threads have left
the TTY, the TTY layer calls a routine in the driver, which can be
used to free resources (unit numbers, etc).
The pts(4) driver also implements this feature, which means
posix_openpt() will now return PTY's that are created on the fly.
- Improved performance:
One of the major improvements is the per-TTY mutex, which is expected
to improve scalability when compared to the old Giant locking.
Another change is the unbuffered copying to userspace, which is both
used on TTY device nodes and PTY masters.
Upgrading should be quite straightforward. Unlike previous versions,
existing kernel configuration files do not need to be changed, except
when they reference device drivers that are listed in UPDATING.
Obtained from: //depot/projects/mpsafetty/...
Approved by: philip (ex-mentor)
Discussed: on the lists, at BSDCan, at the DevSummit
Sponsored by: Snow B.V., the Netherlands
dcons(4) fixed by: kan
2008-08-20 08:31:58 +00:00
|
|
|
break;
|
|
|
|
|
Add experimental support for process descriptors
A "process descriptor" file descriptor is used to manage processes
without using the PID namespace. This is required for Capsicum's
Capability Mode, where the PID namespace is unavailable.
New system calls pdfork(2) and pdkill(2) offer the functional equivalents
of fork(2) and kill(2). pdgetpid(2) allows querying the PID of the remote
process for debugging purposes. The currently-unimplemented pdwait(2) will,
in the future, allow querying rusage/exit status. In the interim, poll(2)
may be used to check (and wait for) process termination.
When a process is referenced by a process descriptor, it does not issue
SIGCHLD to the parent, making it suitable for use in libraries---a common
scenario when using library compartmentalisation from within large
applications (such as web browsers). Some observers may note a similarity
to Mach task ports; process descriptors provide a subset of this behaviour,
but in a UNIX style.
This feature is enabled by "options PROCDESC", but as with several other
Capsicum kernel features, is not enabled by default in GENERIC 9.0.
Reviewed by: jhb, kib
Approved by: re (kib), mentor (rwatson)
Sponsored by: Google Inc
2011-08-18 22:51:30 +00:00
|
|
|
#ifdef PROCDESC
|
|
|
|
case DTYPE_PROCDESC:
|
|
|
|
type = KF_TYPE_PROCDESC;
|
|
|
|
data = fp->f_data;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
default:
|
2011-05-12 10:11:39 +00:00
|
|
|
type = KF_TYPE_UNKNOWN;
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
|
|
|
}
|
2011-05-12 10:11:39 +00:00
|
|
|
refcnt = fp->f_count;
|
|
|
|
fflags = fp->f_flag;
|
2012-07-02 21:01:03 +00:00
|
|
|
offset = foffset_get(fp);
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
|
2011-05-12 10:11:39 +00:00
|
|
|
/*
|
|
|
|
* Create sysctl entry.
|
|
|
|
* It is OK to drop the filedesc lock here as we will
|
|
|
|
* re-validate and re-evaluate its properties when
|
|
|
|
* the loop continues.
|
|
|
|
*/
|
2009-05-15 14:41:44 +00:00
|
|
|
oldidx = req->oldidx;
|
2011-05-12 10:11:39 +00:00
|
|
|
if (type == KF_TYPE_VNODE || type == KF_TYPE_FIFO)
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2011-10-12 12:08:03 +00:00
|
|
|
error = export_fd_for_sysctl(data, type, i, fflags, refcnt,
|
|
|
|
offset, fd_is_cap, fd_cap_rights, kif, req);
|
2011-05-12 10:11:39 +00:00
|
|
|
if (type == KF_TYPE_VNODE || type == KF_TYPE_FIFO)
|
|
|
|
FILEDESC_SLOCK(fdp);
|
2009-05-15 14:41:44 +00:00
|
|
|
if (error) {
|
|
|
|
if (error == ENOMEM) {
|
|
|
|
/*
|
|
|
|
* The hack to keep the ABI of sysctl
|
|
|
|
* kern.proc.filedesc intact, but not
|
|
|
|
* to account a partially copied
|
|
|
|
* kinfo_file into the oldidx.
|
|
|
|
*/
|
|
|
|
req->oldidx = oldidx;
|
|
|
|
error = 0;
|
|
|
|
}
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
break;
|
2009-05-15 14:41:44 +00:00
|
|
|
}
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
}
|
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2011-05-12 10:11:39 +00:00
|
|
|
fail:
|
2011-05-12 10:56:33 +00:00
|
|
|
if (fdp != NULL)
|
|
|
|
fddrop(fdp);
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
free(kif, M_TEMP);
|
2009-05-15 14:41:44 +00:00
|
|
|
return (error);
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
}
|
|
|
|
|
2011-05-12 10:11:39 +00:00
|
|
|
int
|
|
|
|
vntype_to_kinfo(int vtype)
|
|
|
|
{
|
|
|
|
struct {
|
|
|
|
int vtype;
|
|
|
|
int kf_vtype;
|
|
|
|
} vtypes_table[] = {
|
|
|
|
{ VBAD, KF_VTYPE_VBAD },
|
|
|
|
{ VBLK, KF_VTYPE_VBLK },
|
|
|
|
{ VCHR, KF_VTYPE_VCHR },
|
|
|
|
{ VDIR, KF_VTYPE_VDIR },
|
|
|
|
{ VFIFO, KF_VTYPE_VFIFO },
|
|
|
|
{ VLNK, KF_VTYPE_VLNK },
|
|
|
|
{ VNON, KF_VTYPE_VNON },
|
|
|
|
{ VREG, KF_VTYPE_VREG },
|
|
|
|
{ VSOCK, KF_VTYPE_VSOCK }
|
|
|
|
};
|
|
|
|
#define NVTYPES (sizeof(vtypes_table) / sizeof(*vtypes_table))
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform vtype translation.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < NVTYPES; i++)
|
|
|
|
if (vtypes_table[i].vtype == vtype)
|
|
|
|
break;
|
|
|
|
if (i < NVTYPES)
|
|
|
|
return (vtypes_table[i].kf_vtype);
|
|
|
|
|
|
|
|
return (KF_VTYPE_UNKNOWN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fill_vnode_info(struct vnode *vp, struct kinfo_file *kif)
|
|
|
|
{
|
|
|
|
struct vattr va;
|
|
|
|
char *fullpath, *freepath;
|
|
|
|
int error, vfslocked;
|
|
|
|
|
|
|
|
if (vp == NULL)
|
|
|
|
return (1);
|
|
|
|
kif->kf_vnode_type = vntype_to_kinfo(vp->v_type);
|
|
|
|
freepath = NULL;
|
|
|
|
fullpath = "-";
|
|
|
|
error = vn_fullpath(curthread, vp, &fullpath, &freepath);
|
|
|
|
if (error == 0) {
|
|
|
|
strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
|
|
|
|
}
|
|
|
|
if (freepath != NULL)
|
|
|
|
free(freepath, M_TEMP);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Retrieve vnode attributes.
|
|
|
|
*/
|
|
|
|
va.va_fsid = VNOVAL;
|
|
|
|
va.va_rdev = NODEV;
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
vn_lock(vp, LK_SHARED | LK_RETRY);
|
|
|
|
error = VOP_GETATTR(vp, &va, curthread->td_ucred);
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
if (va.va_fsid != VNOVAL)
|
|
|
|
kif->kf_un.kf_file.kf_file_fsid = va.va_fsid;
|
|
|
|
else
|
|
|
|
kif->kf_un.kf_file.kf_file_fsid =
|
|
|
|
vp->v_mount->mnt_stat.f_fsid.val[0];
|
|
|
|
kif->kf_un.kf_file.kf_file_fileid = va.va_fileid;
|
|
|
|
kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode);
|
|
|
|
kif->kf_un.kf_file.kf_file_size = va.va_size;
|
|
|
|
kif->kf_un.kf_file.kf_file_rdev = va.va_rdev;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fill_socket_info(struct socket *so, struct kinfo_file *kif)
|
|
|
|
{
|
|
|
|
struct sockaddr *sa;
|
|
|
|
struct inpcb *inpcb;
|
|
|
|
struct unpcb *unpcb;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (so == NULL)
|
|
|
|
return (1);
|
|
|
|
kif->kf_sock_domain = so->so_proto->pr_domain->dom_family;
|
|
|
|
kif->kf_sock_type = so->so_type;
|
|
|
|
kif->kf_sock_protocol = so->so_proto->pr_protocol;
|
|
|
|
kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
|
|
|
|
switch(kif->kf_sock_domain) {
|
|
|
|
case AF_INET:
|
|
|
|
case AF_INET6:
|
|
|
|
if (kif->kf_sock_protocol == IPPROTO_TCP) {
|
|
|
|
if (so->so_pcb != NULL) {
|
|
|
|
inpcb = (struct inpcb *)(so->so_pcb);
|
|
|
|
kif->kf_un.kf_sock.kf_sock_inpcb =
|
|
|
|
(uintptr_t)inpcb->inp_ppcb;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AF_UNIX:
|
|
|
|
if (so->so_pcb != NULL) {
|
|
|
|
unpcb = (struct unpcb *)(so->so_pcb);
|
|
|
|
if (unpcb->unp_conn) {
|
|
|
|
kif->kf_un.kf_sock.kf_sock_unpconn =
|
|
|
|
(uintptr_t)unpcb->unp_conn;
|
|
|
|
kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
|
|
|
|
so->so_rcv.sb_state;
|
|
|
|
kif->kf_un.kf_sock.kf_sock_snd_sb_state =
|
|
|
|
so->so_snd.sb_state;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
|
|
|
|
if (error == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
|
|
|
|
bcopy(sa, &kif->kf_sa_local, sa->sa_len);
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
}
|
|
|
|
error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
|
|
|
|
if (error == 0 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
|
|
|
|
bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
}
|
|
|
|
strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
|
|
|
|
sizeof(kif->kf_path));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fill_pts_info(struct tty *tp, struct kinfo_file *kif)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (tp == NULL)
|
|
|
|
return (1);
|
|
|
|
kif->kf_un.kf_pts.kf_pts_dev = tty_udev(tp);
|
|
|
|
strlcpy(kif->kf_path, tty_devname(tp), sizeof(kif->kf_path));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fill_pipe_info(struct pipe *pi, struct kinfo_file *kif)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (pi == NULL)
|
|
|
|
return (1);
|
|
|
|
kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi;
|
|
|
|
kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer;
|
|
|
|
kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
Add experimental support for process descriptors
A "process descriptor" file descriptor is used to manage processes
without using the PID namespace. This is required for Capsicum's
Capability Mode, where the PID namespace is unavailable.
New system calls pdfork(2) and pdkill(2) offer the functional equivalents
of fork(2) and kill(2). pdgetpid(2) allows querying the PID of the remote
process for debugging purposes. The currently-unimplemented pdwait(2) will,
in the future, allow querying rusage/exit status. In the interim, poll(2)
may be used to check (and wait for) process termination.
When a process is referenced by a process descriptor, it does not issue
SIGCHLD to the parent, making it suitable for use in libraries---a common
scenario when using library compartmentalisation from within large
applications (such as web browsers). Some observers may note a similarity
to Mach task ports; process descriptors provide a subset of this behaviour,
but in a UNIX style.
This feature is enabled by "options PROCDESC", but as with several other
Capsicum kernel features, is not enabled by default in GENERIC 9.0.
Reviewed by: jhb, kib
Approved by: re (kib), mentor (rwatson)
Sponsored by: Google Inc
2011-08-18 22:51:30 +00:00
|
|
|
static int
|
|
|
|
fill_procdesc_info(struct procdesc *pdp, struct kinfo_file *kif)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (pdp == NULL)
|
|
|
|
return (1);
|
|
|
|
kif->kf_un.kf_proc.kf_pid = pdp->pd_pid;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-04-01 18:22:48 +00:00
|
|
|
static int
|
|
|
|
fill_shm_info(struct file *fp, struct kinfo_file *kif)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
struct stat sb;
|
|
|
|
|
|
|
|
td = curthread;
|
|
|
|
if (fp->f_data == NULL)
|
|
|
|
return (1);
|
|
|
|
if (fo_stat(fp, &sb, td->td_ucred, td) != 0)
|
|
|
|
return (1);
|
|
|
|
shm_path(fp->f_data, kif->kf_path, sizeof(kif->kf_path));
|
|
|
|
kif->kf_un.kf_file.kf_file_mode = sb.st_mode;
|
|
|
|
kif->kf_un.kf_file.kf_file_size = sb.st_size;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
Add two new sysctls in support of the forthcoming procstat(1) to support
its -f and -v arguments:
kern.proc.filedesc - dump file descriptor information for a process, if
debugging is permitted, including socket addresses, open flags, file
offsets, file paths, etc.
kern.proc.vmmap - dump virtual memory mapping information for a process,
if debugging is permitted, including layout and information on
underlying objects, such as the type of object and path.
These provide a superset of the information historically available
through the now-deprecated procfs(4), and are intended to be exported
in an ABI-robust form.
2007-12-02 10:10:27 +00:00
|
|
|
static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc, CTLFLAG_RD,
|
|
|
|
sysctl_kern_proc_filedesc, "Process filedesc entries");
|
|
|
|
|
2005-11-10 10:42:50 +00:00
|
|
|
#ifdef DDB
|
|
|
|
/*
|
|
|
|
* For the purposes of debugging, generate a human-readable string for the
|
|
|
|
* file type.
|
|
|
|
*/
|
|
|
|
static const char *
|
|
|
|
file_type_to_name(short type)
|
|
|
|
{
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case 0:
|
|
|
|
return ("zero");
|
|
|
|
case DTYPE_VNODE:
|
|
|
|
return ("vnod");
|
|
|
|
case DTYPE_SOCKET:
|
|
|
|
return ("sock");
|
|
|
|
case DTYPE_PIPE:
|
|
|
|
return ("pipe");
|
|
|
|
case DTYPE_FIFO:
|
|
|
|
return ("fifo");
|
2007-02-15 10:55:43 +00:00
|
|
|
case DTYPE_KQUEUE:
|
|
|
|
return ("kque");
|
2005-11-10 10:42:50 +00:00
|
|
|
case DTYPE_CRYPTO:
|
|
|
|
return ("crpt");
|
2007-02-15 10:55:43 +00:00
|
|
|
case DTYPE_MQUEUE:
|
|
|
|
return ("mque");
|
Add a new file descriptor type for IPC shared memory objects and use it to
implement shm_open(2) and shm_unlink(2) in the kernel:
- Each shared memory file descriptor is associated with a swap-backed vm
object which provides the backing store. Each descriptor starts off with
a size of zero, but the size can be altered via ftruncate(2). The shared
memory file descriptors also support fstat(2). read(2), write(2),
ioctl(2), select(2), poll(2), and kevent(2) are not supported on shared
memory file descriptors.
- shm_open(2) and shm_unlink(2) are now implemented as system calls that
manage shared memory file descriptors. The virtual namespace that maps
pathnames to shared memory file descriptors is implemented as a hash
table where the hash key is generated via the 32-bit Fowler/Noll/Vo hash
of the pathname.
- As an extension, the constant 'SHM_ANON' may be specified in place of the
path argument to shm_open(2). In this case, an unnamed shared memory
file descriptor will be created similar to the IPC_PRIVATE key for
shmget(2). Note that the shared memory object can still be shared among
processes by sharing the file descriptor via fork(2) or sendmsg(2), but
it is unnamed. This effectively serves to implement the getmemfd() idea
bandied about the lists several times over the years.
- The backing store for shared memory file descriptors are garbage
collected when they are not referenced by any open file descriptors or
the shm_open(2) virtual namespace.
Submitted by: dillon, peter (previous versions)
Submitted by: rwatson (I based this on his version)
Reviewed by: alc (suggested converting getmemfd() to shm_open())
2008-01-08 21:58:16 +00:00
|
|
|
case DTYPE_SHM:
|
|
|
|
return ("shm");
|
Rework the lifetime management of the kernel implementation of POSIX
semaphores. Specifically, semaphores are now represented as new file
descriptor type that is set to close on exec. This removes the need for
all of the manual process reference counting (and fork, exec, and exit
event handlers) as the normal file descriptor operations handle all of
that for us nicely. It is also suggested as one possible implementation
in the spec and at least one other OS (OS X) uses this approach.
Some bugs that were fixed as a result include:
- References to a named semaphore whose name is removed still work after
the sem_unlink() operation. Prior to this patch, if a semaphore's name
was removed, valid handles from sem_open() would get EINVAL errors from
sem_getvalue(), sem_post(), etc. This fixes that.
- Unnamed semaphores created with sem_init() were not cleaned up when a
process exited or exec'd. They were only cleaned up if the process
did an explicit sem_destroy(). This could result in a leak of semaphore
objects that could never be cleaned up.
- On the other hand, if another process guessed the id (kernel pointer to
'struct ksem' of an unnamed semaphore (created via sem_init)) and had
write access to the semaphore based on UID/GID checks, then that other
process could manipulate the semaphore via sem_destroy(), sem_post(),
sem_wait(), etc.
- As part of the permission check (UID/GID), the umask of the proces
creating the semaphore was not honored. Thus if your umask denied group
read/write access but the explicit mode in the sem_init() call allowed
it, the semaphore would be readable/writable by other users in the
same group, for example. This includes access via the previous bug.
- If the module refused to unload because there were active semaphores,
then it might have deregistered one or more of the semaphore system
calls before it noticed that there was a problem. I'm not sure if
this actually happened as the order that modules are discovered by the
kernel linker depends on how the actual .ko file is linked. One can
make the order deterministic by using a single module with a mod_event
handler that explicitly registers syscalls (and deregisters during
unload after any checks). This also fixes a race where even if the
sem_module unloaded first it would have destroyed locks that the
syscalls might be trying to access if they are still executing when
they are unloaded.
XXX: By the way, deregistering system calls doesn't do any blocking
to drain any threads from the calls.
- Some minor fixes to errno values on error. For example, sem_init()
isn't documented to return ENFILE or EMFILE if we run out of semaphores
the way that sem_open() can. Instead, it should return ENOSPC in that
case.
Other changes:
- Kernel semaphores now use a hash table to manage the namespace of
named semaphores nearly in a similar fashion to the POSIX shared memory
object file descriptors. Kernel semaphores can now also have names
longer than 14 chars (up to MAXPATHLEN) and can include subdirectories
in their pathname.
- The UID/GID permission checks for access to a named semaphore are now
done via vaccess() rather than a home-rolled set of checks.
- Now that kernel semaphores have an associated file object, the various
MAC checks for POSIX semaphores accept both a file credential and an
active credential. There is also a new posixsem_check_stat() since it
is possible to fstat() a semaphore file descriptor.
- A small set of regression tests (using the ksem API directly) is present
in src/tools/regression/posixsem.
Reported by: kris (1)
Tested by: kris
Reviewed by: rwatson (lightly)
MFC after: 1 month
2008-06-27 05:39:04 +00:00
|
|
|
case DTYPE_SEM:
|
|
|
|
return ("ksem");
|
2005-11-10 10:42:50 +00:00
|
|
|
default:
|
|
|
|
return ("unkn");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the purposes of debugging, identify a process (if any, perhaps one of
|
|
|
|
* many) that references the passed file in its file descriptor array. Return
|
|
|
|
* NULL if none.
|
|
|
|
*/
|
|
|
|
static struct proc *
|
|
|
|
file_to_first_proc(struct file *fp)
|
|
|
|
{
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct proc *p;
|
|
|
|
int n;
|
|
|
|
|
2007-01-17 14:58:53 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
2005-11-10 10:42:50 +00:00
|
|
|
if (p->p_state == PRS_NEW)
|
|
|
|
continue;
|
|
|
|
fdp = p->p_fd;
|
|
|
|
if (fdp == NULL)
|
|
|
|
continue;
|
|
|
|
for (n = 0; n < fdp->fd_nfiles; n++) {
|
|
|
|
if (fp == fdp->fd_ofiles[n])
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2007-02-15 10:50:48 +00:00
|
|
|
static void
|
|
|
|
db_print_file(struct file *fp, int header)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
if (header)
|
|
|
|
db_printf("%8s %4s %8s %8s %4s %5s %6s %8s %5s %12s\n",
|
|
|
|
"File", "Type", "Data", "Flag", "GCFl", "Count",
|
|
|
|
"MCount", "Vnode", "FPID", "FCmd");
|
|
|
|
p = file_to_first_proc(fp);
|
|
|
|
db_printf("%8p %4s %8p %08x %04x %5d %6d %8p %5d %12s\n", fp,
|
|
|
|
file_type_to_name(fp->f_type), fp->f_data, fp->f_flag,
|
2007-12-30 01:42:15 +00:00
|
|
|
0, fp->f_count, 0, fp->f_vnode,
|
2007-02-15 10:50:48 +00:00
|
|
|
p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-");
|
|
|
|
}
|
|
|
|
|
|
|
|
DB_SHOW_COMMAND(file, db_show_file)
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
|
|
|
|
if (!have_addr) {
|
|
|
|
db_printf("usage: show file <addr>\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
fp = (struct file *)addr;
|
|
|
|
db_print_file(fp, 1);
|
|
|
|
}
|
|
|
|
|
2005-11-10 10:42:50 +00:00
|
|
|
DB_SHOW_COMMAND(files, db_show_files)
|
|
|
|
{
|
2007-12-30 01:42:15 +00:00
|
|
|
struct filedesc *fdp;
|
2005-11-10 10:42:50 +00:00
|
|
|
struct file *fp;
|
2007-12-30 01:42:15 +00:00
|
|
|
struct proc *p;
|
2007-02-15 10:50:48 +00:00
|
|
|
int header;
|
2007-12-30 01:42:15 +00:00
|
|
|
int n;
|
2005-11-10 10:42:50 +00:00
|
|
|
|
2007-02-15 10:50:48 +00:00
|
|
|
header = 1;
|
2007-12-30 01:42:15 +00:00
|
|
|
FOREACH_PROC_IN_SYSTEM(p) {
|
|
|
|
if (p->p_state == PRS_NEW)
|
|
|
|
continue;
|
|
|
|
if ((fdp = p->p_fd) == NULL)
|
|
|
|
continue;
|
|
|
|
for (n = 0; n < fdp->fd_nfiles; ++n) {
|
|
|
|
if ((fp = fdp->fd_ofiles[n]) == NULL)
|
|
|
|
continue;
|
|
|
|
db_print_file(fp, header);
|
|
|
|
header = 0;
|
|
|
|
}
|
2005-11-10 10:42:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2004-01-11 19:39:14 +00:00
|
|
|
SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
|
1999-05-03 23:57:32 +00:00
|
|
|
&maxfilesperproc, 0, "Maximum files allowed open per process");
|
1995-12-04 16:48:58 +00:00
|
|
|
|
2008-03-19 09:58:25 +00:00
|
|
|
SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
|
|
|
|
&maxfiles, 0, "Maximum number of files");
|
1995-12-04 16:48:58 +00:00
|
|
|
|
2004-01-11 19:39:14 +00:00
|
|
|
SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
|
2007-12-30 01:42:15 +00:00
|
|
|
__DEVOLATILE(int *, &openfiles), 0, "System-wide number of open files");
|
2000-08-26 23:49:44 +00:00
|
|
|
|
2004-12-01 09:42:35 +00:00
|
|
|
/* ARGSUSED*/
|
|
|
|
static void
|
|
|
|
filelistinit(void *dummy)
|
|
|
|
{
|
2002-12-23 21:53:20 +00:00
|
|
|
|
2004-12-01 09:42:35 +00:00
|
|
|
file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
|
2009-05-14 03:24:22 +00:00
|
|
|
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
2004-12-01 09:42:35 +00:00
|
|
|
mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
|
|
|
|
mtx_init(&fdesc_mtx, "fdesc", NULL, MTX_DEF);
|
|
|
|
}
|
2008-03-19 09:58:25 +00:00
|
|
|
SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL);
|
2004-12-01 09:42:35 +00:00
|
|
|
|
|
|
|
/*-------------------------------------------------------------------*/
|
1999-08-04 18:53:50 +00:00
|
|
|
|
|
|
|
static int
|
2011-11-15 01:48:53 +00:00
|
|
|
badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred,
|
|
|
|
int flags, struct thread *td)
|
1999-08-04 18:53:50 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
|
2008-01-07 20:05:19 +00:00
|
|
|
static int
|
2011-11-15 01:48:53 +00:00
|
|
|
badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
2008-01-07 20:05:19 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
1999-08-04 18:53:50 +00:00
|
|
|
static int
|
2011-11-15 01:48:53 +00:00
|
|
|
badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
1999-08-04 18:53:50 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-11-15 01:48:53 +00:00
|
|
|
badfo_poll(struct file *fp, int events, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
1999-08-04 18:53:50 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2001-02-15 16:34:11 +00:00
|
|
|
static int
|
2004-12-01 09:42:35 +00:00
|
|
|
badfo_kqfilter(struct file *fp, struct knote *kn)
|
2001-02-15 16:34:11 +00:00
|
|
|
{
|
|
|
|
|
2006-09-24 02:29:53 +00:00
|
|
|
return (EBADF);
|
2001-02-15 16:34:11 +00:00
|
|
|
}
|
|
|
|
|
1999-11-08 03:27:14 +00:00
|
|
|
static int
|
2011-11-15 01:48:53 +00:00
|
|
|
badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
1999-11-08 03:27:14 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
|
1999-08-04 18:53:50 +00:00
|
|
|
static int
|
2004-12-01 09:42:35 +00:00
|
|
|
badfo_close(struct file *fp, struct thread *td)
|
1999-08-04 18:53:50 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
|
2011-08-16 20:07:47 +00:00
|
|
|
static int
|
|
|
|
badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
|
2004-12-01 09:42:35 +00:00
|
|
|
struct fileops badfileops = {
|
|
|
|
.fo_read = badfo_readwrite,
|
|
|
|
.fo_write = badfo_readwrite,
|
2008-01-07 20:05:19 +00:00
|
|
|
.fo_truncate = badfo_truncate,
|
2004-12-01 09:42:35 +00:00
|
|
|
.fo_ioctl = badfo_ioctl,
|
|
|
|
.fo_poll = badfo_poll,
|
|
|
|
.fo_kqfilter = badfo_kqfilter,
|
|
|
|
.fo_stat = badfo_stat,
|
|
|
|
.fo_close = badfo_close,
|
2011-08-16 20:07:47 +00:00
|
|
|
.fo_chmod = badfo_chmod,
|
|
|
|
.fo_chown = badfo_chown,
|
2004-12-01 09:42:35 +00:00
|
|
|
};
|
2002-10-16 15:45:37 +00:00
|
|
|
|
2011-08-16 20:07:47 +00:00
|
|
|
int
|
|
|
|
invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2004-12-01 09:29:31 +00:00
|
|
|
|
|
|
|
/*-------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File Descriptor pseudo-device driver (/dev/fd/).
|
|
|
|
*
|
|
|
|
* Opening minor device N dup()s the file (if any) connected to file
|
|
|
|
* descriptor N belonging to the calling process. Note that this driver
|
|
|
|
* consists of only the ``open()'' routine, because all subsequent
|
|
|
|
* references to this file will be direct to the other driver.
|
|
|
|
*
|
|
|
|
* XXX: we could give this one a cloning event handler if necessary.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
fdopen(struct cdev *dev, int mode, int type, struct thread *td)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX Kludge: set curthread->td_dupfd to contain the value of the
|
|
|
|
* the file descriptor being sought for duplication. The error
|
|
|
|
* return ensures that the vnode for this device will be released
|
|
|
|
* by vn_open. Open will detect this special error and take the
|
|
|
|
* actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
|
|
|
|
* will simply report the error.
|
|
|
|
*/
|
|
|
|
td->td_dupfd = dev2unit(dev);
|
|
|
|
return (ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct cdevsw fildesc_cdevsw = {
|
|
|
|
.d_version = D_VERSION,
|
|
|
|
.d_open = fdopen,
|
|
|
|
.d_name = "FD",
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
fildesc_drvinit(void *unused)
|
|
|
|
{
|
|
|
|
struct cdev *dev;
|
|
|
|
|
2011-01-04 10:59:38 +00:00
|
|
|
dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 0, NULL,
|
|
|
|
UID_ROOT, GID_WHEEL, 0666, "fd/0");
|
2004-12-01 09:29:31 +00:00
|
|
|
make_dev_alias(dev, "stdin");
|
2011-01-04 10:59:38 +00:00
|
|
|
dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 1, NULL,
|
|
|
|
UID_ROOT, GID_WHEEL, 0666, "fd/1");
|
2004-12-01 09:29:31 +00:00
|
|
|
make_dev_alias(dev, "stdout");
|
2011-01-04 10:59:38 +00:00
|
|
|
dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 2, NULL,
|
|
|
|
UID_ROOT, GID_WHEEL, 0666, "fd/2");
|
2004-12-01 09:29:31 +00:00
|
|
|
make_dev_alias(dev, "stderr");
|
|
|
|
}
|
|
|
|
|
2008-03-16 10:58:09 +00:00
|
|
|
SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL);
|