freebsd-dev/sys/fs/fdescfs/fdesc_vnops.c

625 lines
14 KiB
C
Raw Normal View History

/*-
1994-05-24 10:09:53 +00:00
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software donated to Berkeley by
* Jan-Simon Pendry.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)fdesc_vnops.c 8.9 (Berkeley) 1/21/94
*
1999-08-28 01:08:13 +00:00
* $FreeBSD$
1994-05-24 10:09:53 +00:00
*/
/*
* /dev/fd Filesystem
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/dirent.h>
1994-05-24 10:09:53 +00:00
#include <sys/filedesc.h>
#include <sys/kernel.h> /* boottime */
#include <sys/lock.h>
#include <sys/mutex.h>
1994-05-24 10:09:53 +00:00
#include <sys/malloc.h>
#include <sys/file.h> /* Must come after sys/malloc.h */
1994-05-24 10:09:53 +00:00
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/stat.h>
#include <sys/vnode.h>
#include <fs/fdescfs/fdesc.h>
1994-05-24 10:09:53 +00:00
#define NFDCACHE 4
#define FD_NHASH(ix) \
(&fdhashtbl[(ix) & fdhash])
static LIST_HEAD(fdhashhead, fdescnode) *fdhashtbl;
1998-02-09 06:11:36 +00:00
static u_long fdhash;
1994-05-24 10:09:53 +00:00
struct mtx fdesc_hashmtx;
static vop_getattr_t fdesc_getattr;
static vop_lookup_t fdesc_lookup;
static vop_open_t fdesc_open;
static vop_readdir_t fdesc_readdir;
static vop_reclaim_t fdesc_reclaim;
static vop_setattr_t fdesc_setattr;
static struct vop_vector fdesc_vnodeops = {
.vop_default = &default_vnodeops,
.vop_access = VOP_NULL,
.vop_getattr = fdesc_getattr,
.vop_lookup = fdesc_lookup,
.vop_open = fdesc_open,
.vop_pathconf = vop_stdpathconf,
.vop_readdir = fdesc_readdir,
.vop_reclaim = fdesc_reclaim,
.vop_setattr = fdesc_setattr,
};
static void fdesc_insmntque_dtr(struct vnode *, void *);
static void fdesc_remove_entry(struct fdescnode *);
1994-05-24 10:09:53 +00:00
/*
* Initialise cache headers
*/
int
fdesc_init(vfsp)
struct vfsconf *vfsp;
1994-05-24 10:09:53 +00:00
{
mtx_init(&fdesc_hashmtx, "fdescfs_hash", NULL, MTX_DEF);
fdhashtbl = hashinit(NFDCACHE, M_CACHE, &fdhash);
return (0);
1994-05-24 10:09:53 +00:00
}
/*
* Uninit ready for unload.
*/
int
fdesc_uninit(vfsp)
struct vfsconf *vfsp;
{
hashdestroy(fdhashtbl, M_CACHE, fdhash);
mtx_destroy(&fdesc_hashmtx);
return (0);
}
/*
* If allocating vnode fails, call this.
*/
static void
fdesc_insmntque_dtr(struct vnode *vp, void *arg)
{
vgone(vp);
vput(vp);
}
/*
* Remove an entry from the hash if it exists.
*/
static void
fdesc_remove_entry(struct fdescnode *fd)
{
struct fdhashhead *fc;
struct fdescnode *fd2;
fc = FD_NHASH(fd->fd_ix);
mtx_lock(&fdesc_hashmtx);
LIST_FOREACH(fd2, fc, fd_hash) {
if (fd == fd2) {
LIST_REMOVE(fd, fd_hash);
break;
}
}
mtx_unlock(&fdesc_hashmtx);
}
1994-05-24 10:09:53 +00:00
int
fdesc_allocvp(ftype, fd_fd, ix, mp, vpp, td)
1994-05-24 10:09:53 +00:00
fdntype ftype;
unsigned fd_fd;
1994-05-24 10:09:53 +00:00
int ix;
struct mount *mp;
struct vnode **vpp;
struct thread *td;
1994-05-24 10:09:53 +00:00
{
struct fdescmount *fmp;
struct fdhashhead *fc;
struct fdescnode *fd, *fd2;
struct vnode *vp, *vp2;
1994-05-24 10:09:53 +00:00
int error = 0;
fc = FD_NHASH(ix);
1994-05-24 10:09:53 +00:00
loop:
mtx_lock(&fdesc_hashmtx);
/*
* If a forced unmount is progressing, we need to drop it. The flags are
* protected by the hashmtx.
*/
fmp = (struct fdescmount *)mp->mnt_data;
if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) {
mtx_unlock(&fdesc_hashmtx);
return (-1);
}
LIST_FOREACH(fd, fc, fd_hash) {
1994-05-24 10:09:53 +00:00
if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) {
/* Get reference to vnode in case it's being free'd */
vp = fd->fd_vnode;
VI_LOCK(vp);
mtx_unlock(&fdesc_hashmtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td))
1994-05-24 10:09:53 +00:00
goto loop;
*vpp = vp;
return (0);
1994-05-24 10:09:53 +00:00
}
}
mtx_unlock(&fdesc_hashmtx);
1994-05-24 10:09:53 +00:00
MALLOC(fd, struct fdescnode *, sizeof(struct fdescnode), M_TEMP, M_WAITOK);
error = getnewvnode("fdescfs", mp, &fdesc_vnodeops, &vp);
if (error) {
FREE(fd, M_TEMP);
return (error);
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
vp->v_data = fd;
fd->fd_vnode = vp;
1994-05-24 10:09:53 +00:00
fd->fd_type = ftype;
fd->fd_fd = fd_fd;
1994-05-24 10:09:53 +00:00
fd->fd_ix = ix;
error = insmntque1(vp, mp, fdesc_insmntque_dtr, NULL);
if (error != 0) {
*vpp = NULLVP;
return (error);
}
1994-05-24 10:09:53 +00:00
/* Make sure that someone didn't beat us when inserting the vnode. */
mtx_lock(&fdesc_hashmtx);
/*
* If a forced unmount is progressing, we need to drop it. The flags are
* protected by the hashmtx.
*/
fmp = (struct fdescmount *)mp->mnt_data;
if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) {
mtx_unlock(&fdesc_hashmtx);
vgone(vp);
vput(vp);
*vpp = NULLVP;
return (-1);
}
1994-05-24 10:09:53 +00:00
LIST_FOREACH(fd2, fc, fd_hash) {
if (fd2->fd_ix == ix && fd2->fd_vnode->v_mount == mp) {
/* Get reference to vnode in case it's being free'd */
vp2 = fd2->fd_vnode;
VI_LOCK(vp2);
mtx_unlock(&fdesc_hashmtx);
error = vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, td);
/* Someone beat us, dec use count and wait for reclaim */
vgone(vp);
vput(vp);
/* If we didn't get it, return no vnode. */
if (error)
vp2 = NULLVP;
*vpp = vp2;
return (error);
}
1994-05-24 10:09:53 +00:00
}
/* If we came here, we can insert it safely. */
LIST_INSERT_HEAD(fc, fd, fd_hash);
mtx_unlock(&fdesc_hashmtx);
*vpp = vp;
return (0);
1994-05-24 10:09:53 +00:00
}
/*
* vp is the current namei directory
* ndp is the name to locate in that directory...
*/
static int
1994-05-24 10:09:53 +00:00
fdesc_lookup(ap)
struct vop_lookup_args /* {
struct vnode * a_dvp;
struct vnode ** a_vpp;
struct componentname * a_cnp;
} */ *ap;
{
struct vnode **vpp = ap->a_vpp;
struct vnode *dvp = ap->a_dvp;
struct componentname *cnp = ap->a_cnp;
char *pname = cnp->cn_nameptr;
struct thread *td = cnp->cn_thread;
struct file *fp;
int nlen = cnp->cn_namelen;
u_int fd;
1994-05-24 10:09:53 +00:00
int error;
struct vnode *fvp;
if ((cnp->cn_flags & ISLASTCN) &&
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
error = EROFS;
goto bad;
}
if (cnp->cn_namelen == 1 && *pname == '.') {
1994-05-24 10:09:53 +00:00
*vpp = dvp;
VREF(dvp);
1994-05-24 10:09:53 +00:00
return (0);
}
if (VTOFDESC(dvp)->fd_type != Froot) {
1994-05-24 10:09:53 +00:00
error = ENOTDIR;
goto bad;
}
1994-05-24 10:09:53 +00:00
fd = 0;
/* the only time a leading 0 is acceptable is if it's "0" */
if (*pname == '0' && nlen != 1) {
error = ENOENT;
goto bad;
}
while (nlen--) {
if (*pname < '0' || *pname > '9') {
1994-05-24 10:09:53 +00:00
error = ENOENT;
goto bad;
}
fd = 10 * fd + *pname++ - '0';
}
1994-05-24 10:09:53 +00:00
if ((error = fget(td, fd, &fp)) != 0)
goto bad;
1994-05-24 10:09:53 +00:00
/* Check if we're looking up ourselves. */
if (VTOFDESC(dvp)->fd_ix == FD_DESC + fd) {
/*
* In case we're holding the last reference to the file, the dvp
* will be re-acquired.
*/
vhold(dvp);
VOP_UNLOCK(dvp, 0);
fdrop(fp, td);
/* Re-aquire the lock afterwards. */
vn_lock(dvp, LK_RETRY | LK_EXCLUSIVE);
vdrop(dvp);
fvp = dvp;
} else {
/*
* Unlock our root node (dvp) when doing this, since we might
* deadlock since the vnode might be locked by another thread
* and the root vnode lock will be obtained afterwards (in case
* we're looking up the fd of the root vnode), which will be the
* opposite lock order. Vhold the root vnode first so we don't
* loose it.
*/
vhold(dvp);
VOP_UNLOCK(dvp, 0);
error = fdesc_allocvp(Fdesc, fd, FD_DESC + fd, dvp->v_mount,
&fvp, td);
fdrop(fp, td);
/*
* The root vnode must be locked last to prevent deadlock condition.
*/
vn_lock(dvp, LK_RETRY | LK_EXCLUSIVE);
vdrop(dvp);
}
if (error)
goto bad;
*vpp = fvp;
return (0);
bad:
1994-05-24 10:09:53 +00:00
*vpp = NULL;
return (error);
}
static int
1994-05-24 10:09:53 +00:00
fdesc_open(ap)
struct vop_open_args /* {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct thread *a_td;
1994-05-24 10:09:53 +00:00
} */ *ap;
{
struct vnode *vp = ap->a_vp;
if (VTOFDESC(vp)->fd_type == Froot)
return (0);
1994-05-24 10:09:53 +00:00
/*
* XXX Kludge: set td->td_proc->p_dupfd to contain the value of the the file
* descriptor being sought for duplication. The error return ensures
* that the vnode for this device will be released by vn_open. Open
* will detect this special error and take the actions in dupfdopen.
* Other callers of vn_open or VOP_OPEN will simply report the
* error.
*/
ap->a_td->td_dupfd = VTOFDESC(vp)->fd_fd; /* XXX */
return (ENODEV);
1994-05-24 10:09:53 +00:00
}
static int
1994-05-24 10:09:53 +00:00
fdesc_getattr(ap)
struct vop_getattr_args /* {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct thread *a_td;
1994-05-24 10:09:53 +00:00
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
struct file *fp;
struct stat stb;
u_int fd;
1994-05-24 10:09:53 +00:00
int error = 0;
switch (VTOFDESC(vp)->fd_type) {
case Froot:
VATTR_NULL(vap);
1994-05-24 10:09:53 +00:00
vap->va_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH;
vap->va_type = VDIR;
vap->va_nlink = 2;
vap->va_size = DEV_BSIZE;
vap->va_fileid = VTOFDESC(vp)->fd_ix;
1994-05-24 10:09:53 +00:00
vap->va_uid = 0;
vap->va_gid = 0;
vap->va_blocksize = DEV_BSIZE;
vap->va_atime.tv_sec = boottime.tv_sec;
vap->va_atime.tv_nsec = 0;
1994-05-24 10:09:53 +00:00
vap->va_mtime = vap->va_atime;
vap->va_ctime = vap->va_mtime;
vap->va_gen = 0;
vap->va_flags = 0;
vap->va_rdev = 0;
vap->va_bytes = 0;
break;
case Fdesc:
fd = VTOFDESC(vp)->fd_fd;
if ((error = fget(ap->a_td, fd, &fp)) != 0)
return (error);
bzero(&stb, sizeof(stb));
Make similar changes to fo_stat() and fo_poll() as made earlier to fo_read() and fo_write(): explicitly use the cred argument to fo_poll() as "active_cred" using the passed file descriptor's f_cred reference to provide access to the file credential. Add an active_cred argument to fo_stat() so that implementers have access to the active credential as well as the file credential. Generally modify callers of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which was redundantly provided via the fp argument. This set of modifications also permits threads to perform these operations on behalf of another thread without modifying their credential. Trickle this change down into fo_stat/poll() implementations: - badfo_poll(), badfo_stat(): modify/add arguments. - kqueue_poll(), kqueue_stat(): modify arguments. - pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to MAC checks rather than td->td_ucred. - soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather than cred to pru_sopoll() to maintain current semantics. - sopoll(): moidfy arguments. - vn_poll(), vn_statfile(): modify/add arguments, pass new arguments to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL() to maintian current semantics. - vn_close(): rename cred to file_cred to reflect reality while I'm here. - vn_stat(): Add active_cred and file_cred arguments to vn_stat() and consumers so that this distinction is maintained at the VFS as well as 'struct file' layer. Pass active_cred instead of td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics. - fifofs: modify the creation of a "filetemp" so that the file credential is properly initialized and can be used in the socket code if desired. Pass ap->a_td->td_ucred as the active credential to soo_poll(). If we teach the vnop interface about the distinction between file and active credentials, we would use the active credential here. Note that current inconsistent passing of active_cred vs. file_cred to VOP's is maintained. It's not clear why GETATTR would be authorized using active_cred while POLL would be authorized using file_cred at the file system level. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
error = fo_stat(fp, &stb, ap->a_td->td_ucred, ap->a_td);
fdrop(fp, ap->a_td);
if (error == 0) {
VATTR_NULL(vap);
vap->va_type = IFTOVT(stb.st_mode);
vap->va_mode = stb.st_mode;
#define FDRX (VREAD|VEXEC)
if (vap->va_type == VDIR)
vap->va_mode &= ~((FDRX)|(FDRX>>3)|(FDRX>>6));
#undef FDRX
vap->va_nlink = 1;
vap->va_flags = 0;
vap->va_bytes = stb.st_blocks * stb.st_blksize;
vap->va_fileid = VTOFDESC(vp)->fd_ix;
vap->va_size = stb.st_size;
vap->va_blocksize = stb.st_blksize;
vap->va_rdev = stb.st_rdev;
/*
* If no time data is provided, use the current time.
*/
if (stb.st_atimespec.tv_sec == 0 &&
stb.st_atimespec.tv_nsec == 0)
nanotime(&stb.st_atimespec);
if (stb.st_ctimespec.tv_sec == 0 &&
stb.st_ctimespec.tv_nsec == 0)
nanotime(&stb.st_ctimespec);
if (stb.st_mtimespec.tv_sec == 0 &&
stb.st_mtimespec.tv_nsec == 0)
nanotime(&stb.st_mtimespec);
vap->va_atime = stb.st_atimespec;
vap->va_mtime = stb.st_mtimespec;
vap->va_ctime = stb.st_ctimespec;
vap->va_uid = stb.st_uid;
vap->va_gid = stb.st_gid;
}
1994-05-24 10:09:53 +00:00
break;
default:
panic("fdesc_getattr");
1995-05-30 08:16:23 +00:00
break;
1994-05-24 10:09:53 +00:00
}
if (error == 0)
vp->v_type = vap->va_type;
return (error);
}
static int
1994-05-24 10:09:53 +00:00
fdesc_setattr(ap)
struct vop_setattr_args /* {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
struct thread *a_td;
1994-05-24 10:09:53 +00:00
} */ *ap;
{
1998-06-10 21:21:31 +00:00
struct vattr *vap = ap->a_vap;
struct vnode *vp;
struct mount *mp;
1994-05-24 10:09:53 +00:00
struct file *fp;
unsigned fd;
int error;
/*
* Can't mess with the root vnode
*/
if (VTOFDESC(ap->a_vp)->fd_type == Froot)
1994-05-24 10:09:53 +00:00
return (EACCES);
fd = VTOFDESC(ap->a_vp)->fd_fd;
/*
* Allow setattr where there is an underlying vnode.
1994-05-24 10:09:53 +00:00
*/
error = getvnode(ap->a_td->td_proc->p_fd, fd, &fp);
if (error) {
/*
* getvnode() returns EINVAL if the file descriptor is not
* backed by a vnode. Silently drop all changes except
* chflags(2) in this case.
*/
if (error == EINVAL) {
if (vap->va_flags != VNOVAL)
error = EOPNOTSUPP;
else
error = 0;
}
return (error);
1994-05-24 10:09:53 +00:00
}
vp = fp->f_vnode;
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) == 0) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_SETATTR(vp, ap->a_vap, ap->a_cred, ap->a_td);
VOP_UNLOCK(vp, 0);
vn_finished_write(mp);
}
fdrop(fp, ap->a_td);
1994-05-24 10:09:53 +00:00
return (error);
}
#define UIO_MX 16
static int
1994-05-24 10:09:53 +00:00
fdesc_readdir(ap)
struct vop_readdir_args /* {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
u_long *a_cookies;
int a_ncookies;
1994-05-24 10:09:53 +00:00
} */ *ap;
{
struct uio *uio = ap->a_uio;
struct filedesc *fdp;
struct dirent d;
struct dirent *dp = &d;
int error, i, off, fcnt;
1994-05-24 10:09:53 +00:00
/*
* We don't allow exporting fdesc mounts, and currently local
* requests do not need cookies.
*/
if (ap->a_ncookies)
panic("fdesc_readdir: not hungry");
if (VTOFDESC(ap->a_vp)->fd_type != Froot)
panic("fdesc_readdir: not dir");
1994-05-24 10:09:53 +00:00
off = (int)uio->uio_offset;
if (off != uio->uio_offset || off < 0 || (u_int)off % UIO_MX != 0 ||
uio->uio_resid < UIO_MX)
return (EINVAL);
i = (u_int)off / UIO_MX;
fdp = uio->uio_td->td_proc->p_fd;
error = 0;
1994-05-24 10:09:53 +00:00
fcnt = i - 2; /* The first two nodes are `.' and `..' */
1994-05-24 10:09:53 +00:00
Replace custom file descriptor array sleep lock constructed using a mutex and flags with an sxlock. This leads to a significant and measurable performance improvement as a result of access to shared locking for frequent lookup operations, reduced general overhead, and reduced overhead in the event of contention. All of these are imported for threaded applications where simultaneous access to a shared file descriptor array occurs frequently. Kris has reported 2x-4x transaction rate improvements on 8-core MySQL benchmarks; smaller improvements can be expected for many workloads as a result of reduced overhead. - Generally eliminate the distinction between "fast" and regular acquisisition of the filedesc lock; the plan is that they will now all be fast. Change all locking instances to either shared or exclusive locks. - Correct a bug (pointed out by kib) in fdfree() where previously msleep() was called without the mutex held; sx_sleep() is now always called with the sxlock held exclusively. - Universally hold the struct file lock over changes to struct file, rather than the filedesc lock or no lock. Always update the f_ops field last. A further memory barrier is required here in the future (discussed with jhb). - Improve locking and reference management in linux_at(), which fails to properly acquire vnode references before using vnode pointers. Annotate improper use of vn_fullpath(), which will be replaced at a future date. In fcntl(), we conservatively acquire an exclusive lock, even though in some cases a shared lock may be sufficient, which should be revisited. The dropping of the filedesc lock in fdgrowtable() is no longer required as the sxlock can be held over the sleep operation; we should consider removing that (pointed out by attilio). Tested by: kris Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
FILEDESC_SLOCK(fdp);
while (i < fdp->fd_nfiles + 2 && uio->uio_resid >= UIO_MX) {
switch (i) {
case 0: /* `.' */
case 1: /* `..' */
bzero((caddr_t)dp, UIO_MX);
1994-05-24 10:09:53 +00:00
dp->d_fileno = i + FD_ROOT;
dp->d_namlen = i + 1;
dp->d_reclen = UIO_MX;
bcopy("..", dp->d_name, dp->d_namlen);
dp->d_name[i + 1] = '\0';
dp->d_type = DT_DIR;
break;
default:
if (fdp->fd_ofiles[fcnt] == NULL) {
Replace custom file descriptor array sleep lock constructed using a mutex and flags with an sxlock. This leads to a significant and measurable performance improvement as a result of access to shared locking for frequent lookup operations, reduced general overhead, and reduced overhead in the event of contention. All of these are imported for threaded applications where simultaneous access to a shared file descriptor array occurs frequently. Kris has reported 2x-4x transaction rate improvements on 8-core MySQL benchmarks; smaller improvements can be expected for many workloads as a result of reduced overhead. - Generally eliminate the distinction between "fast" and regular acquisisition of the filedesc lock; the plan is that they will now all be fast. Change all locking instances to either shared or exclusive locks. - Correct a bug (pointed out by kib) in fdfree() where previously msleep() was called without the mutex held; sx_sleep() is now always called with the sxlock held exclusively. - Universally hold the struct file lock over changes to struct file, rather than the filedesc lock or no lock. Always update the f_ops field last. A further memory barrier is required here in the future (discussed with jhb). - Improve locking and reference management in linux_at(), which fails to properly acquire vnode references before using vnode pointers. Annotate improper use of vn_fullpath(), which will be replaced at a future date. In fcntl(), we conservatively acquire an exclusive lock, even though in some cases a shared lock may be sufficient, which should be revisited. The dropping of the filedesc lock in fdgrowtable() is no longer required as the sxlock can be held over the sleep operation; we should consider removing that (pointed out by attilio). Tested by: kris Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
FILEDESC_SUNLOCK(fdp);
goto done;
}
1994-05-24 10:09:53 +00:00
bzero((caddr_t) dp, UIO_MX);
dp->d_namlen = sprintf(dp->d_name, "%d", fcnt);
1994-05-24 10:09:53 +00:00
dp->d_reclen = UIO_MX;
dp->d_type = DT_UNKNOWN;
dp->d_fileno = i + FD_DESC;
break;
1994-05-24 10:09:53 +00:00
}
/*
* And ship to userland
*/
Replace custom file descriptor array sleep lock constructed using a mutex and flags with an sxlock. This leads to a significant and measurable performance improvement as a result of access to shared locking for frequent lookup operations, reduced general overhead, and reduced overhead in the event of contention. All of these are imported for threaded applications where simultaneous access to a shared file descriptor array occurs frequently. Kris has reported 2x-4x transaction rate improvements on 8-core MySQL benchmarks; smaller improvements can be expected for many workloads as a result of reduced overhead. - Generally eliminate the distinction between "fast" and regular acquisisition of the filedesc lock; the plan is that they will now all be fast. Change all locking instances to either shared or exclusive locks. - Correct a bug (pointed out by kib) in fdfree() where previously msleep() was called without the mutex held; sx_sleep() is now always called with the sxlock held exclusively. - Universally hold the struct file lock over changes to struct file, rather than the filedesc lock or no lock. Always update the f_ops field last. A further memory barrier is required here in the future (discussed with jhb). - Improve locking and reference management in linux_at(), which fails to properly acquire vnode references before using vnode pointers. Annotate improper use of vn_fullpath(), which will be replaced at a future date. In fcntl(), we conservatively acquire an exclusive lock, even though in some cases a shared lock may be sufficient, which should be revisited. The dropping of the filedesc lock in fdgrowtable() is no longer required as the sxlock can be held over the sleep operation; we should consider removing that (pointed out by attilio). Tested by: kris Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
FILEDESC_SUNLOCK(fdp);
error = uiomove(dp, UIO_MX, uio);
if (error)
goto done;
Replace custom file descriptor array sleep lock constructed using a mutex and flags with an sxlock. This leads to a significant and measurable performance improvement as a result of access to shared locking for frequent lookup operations, reduced general overhead, and reduced overhead in the event of contention. All of these are imported for threaded applications where simultaneous access to a shared file descriptor array occurs frequently. Kris has reported 2x-4x transaction rate improvements on 8-core MySQL benchmarks; smaller improvements can be expected for many workloads as a result of reduced overhead. - Generally eliminate the distinction between "fast" and regular acquisisition of the filedesc lock; the plan is that they will now all be fast. Change all locking instances to either shared or exclusive locks. - Correct a bug (pointed out by kib) in fdfree() where previously msleep() was called without the mutex held; sx_sleep() is now always called with the sxlock held exclusively. - Universally hold the struct file lock over changes to struct file, rather than the filedesc lock or no lock. Always update the f_ops field last. A further memory barrier is required here in the future (discussed with jhb). - Improve locking and reference management in linux_at(), which fails to properly acquire vnode references before using vnode pointers. Annotate improper use of vn_fullpath(), which will be replaced at a future date. In fcntl(), we conservatively acquire an exclusive lock, even though in some cases a shared lock may be sufficient, which should be revisited. The dropping of the filedesc lock in fdgrowtable() is no longer required as the sxlock can be held over the sleep operation; we should consider removing that (pointed out by attilio). Tested by: kris Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
FILEDESC_SLOCK(fdp);
1994-05-24 10:09:53 +00:00
i++;
fcnt++;
1994-05-24 10:09:53 +00:00
}
Replace custom file descriptor array sleep lock constructed using a mutex and flags with an sxlock. This leads to a significant and measurable performance improvement as a result of access to shared locking for frequent lookup operations, reduced general overhead, and reduced overhead in the event of contention. All of these are imported for threaded applications where simultaneous access to a shared file descriptor array occurs frequently. Kris has reported 2x-4x transaction rate improvements on 8-core MySQL benchmarks; smaller improvements can be expected for many workloads as a result of reduced overhead. - Generally eliminate the distinction between "fast" and regular acquisisition of the filedesc lock; the plan is that they will now all be fast. Change all locking instances to either shared or exclusive locks. - Correct a bug (pointed out by kib) in fdfree() where previously msleep() was called without the mutex held; sx_sleep() is now always called with the sxlock held exclusively. - Universally hold the struct file lock over changes to struct file, rather than the filedesc lock or no lock. Always update the f_ops field last. A further memory barrier is required here in the future (discussed with jhb). - Improve locking and reference management in linux_at(), which fails to properly acquire vnode references before using vnode pointers. Annotate improper use of vn_fullpath(), which will be replaced at a future date. In fcntl(), we conservatively acquire an exclusive lock, even though in some cases a shared lock may be sufficient, which should be revisited. The dropping of the filedesc lock in fdgrowtable() is no longer required as the sxlock can be held over the sleep operation; we should consider removing that (pointed out by attilio). Tested by: kris Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
FILEDESC_SUNLOCK(fdp);
1994-05-24 10:09:53 +00:00
done:
1994-05-24 10:09:53 +00:00
uio->uio_offset = i * UIO_MX;
return (error);
}
static int
1994-05-24 10:09:53 +00:00
fdesc_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
} */ *ap;
{
struct vnode *vp;
struct fdescnode *fd;
1994-05-24 10:09:53 +00:00
vp = ap->a_vp;
fd = VTOFDESC(vp);
fdesc_remove_entry(fd);
1994-05-24 10:09:53 +00:00
FREE(vp->v_data, M_TEMP);
vp->v_data = NULL;
1994-05-24 10:09:53 +00:00
return (0);
}