2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
2004-07-10 21:47:53 +00:00
|
|
|
#include <sys/kdb.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/stat.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
2003-04-29 13:36:06 +00:00
|
|
|
#include <sys/limits.h>
|
2001-10-11 17:52:20 +00:00
|
|
|
#include <sys/lock.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mount.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/vnode.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1999-08-04 18:53:50 +00:00
|
|
|
#include <sys/buf.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/filio.h>
|
2010-05-05 16:44:25 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2002-03-26 01:09:51 +00:00
|
|
|
#include <sys/sx.h>
|
2012-06-03 16:06:56 +00:00
|
|
|
#include <sys/sysctl.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/ttycom.h>
|
1999-08-13 11:22:48 +00:00
|
|
|
#include <sys/conf.h>
|
2001-12-18 20:48:54 +00:00
|
|
|
#include <sys/syslog.h>
|
2004-06-01 18:03:20 +00:00
|
|
|
#include <sys/unistd.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2011-08-16 20:07:47 +00:00
|
|
|
#include <security/audit/audit.h>
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
|
|
|
|
2011-08-25 08:17:39 +00:00
|
|
|
#include <vm/vm.h>
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
2011-08-25 08:17:39 +00:00
|
|
|
#include <vm/vm_object.h>
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
#include <vm/vm_page.h>
|
2011-08-25 08:17:39 +00:00
|
|
|
|
2002-12-24 09:44:51 +00:00
|
|
|
static fo_rdwr_t vn_read;
|
|
|
|
static fo_rdwr_t vn_write;
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
static fo_rdwr_t vn_io_fault;
|
2008-01-07 20:05:19 +00:00
|
|
|
static fo_truncate_t vn_truncate;
|
2002-12-24 09:44:51 +00:00
|
|
|
static fo_ioctl_t vn_ioctl;
|
|
|
|
static fo_poll_t vn_poll;
|
|
|
|
static fo_kqfilter_t vn_kqfilter;
|
|
|
|
static fo_stat_t vn_statfile;
|
|
|
|
static fo_close_t vn_closefile;
|
1995-12-17 21:23:44 +00:00
|
|
|
|
2001-02-15 16:34:11 +00:00
|
|
|
struct fileops vnops = {
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
.fo_read = vn_io_fault,
|
|
|
|
.fo_write = vn_io_fault,
|
2008-01-07 20:05:19 +00:00
|
|
|
.fo_truncate = vn_truncate,
|
2003-06-18 18:16:40 +00:00
|
|
|
.fo_ioctl = vn_ioctl,
|
|
|
|
.fo_poll = vn_poll,
|
|
|
|
.fo_kqfilter = vn_kqfilter,
|
|
|
|
.fo_stat = vn_statfile,
|
|
|
|
.fo_close = vn_closefile,
|
2011-08-16 20:07:47 +00:00
|
|
|
.fo_chmod = vn_chmod,
|
|
|
|
.fo_chown = vn_chown,
|
2003-06-18 19:53:59 +00:00
|
|
|
.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
|
2000-04-16 18:53:38 +00:00
|
|
|
};
|
|
|
|
|
2001-11-11 22:39:07 +00:00
|
|
|
int
|
2007-05-31 11:51:53 +00:00
|
|
|
vn_open(ndp, flagp, cmode, fp)
|
2003-07-27 20:05:36 +00:00
|
|
|
struct nameidata *ndp;
|
2007-05-31 11:51:53 +00:00
|
|
|
int *flagp, cmode;
|
|
|
|
struct file *fp;
|
2001-11-11 22:39:07 +00:00
|
|
|
{
|
|
|
|
struct thread *td = ndp->ni_cnd.cn_thread;
|
|
|
|
|
2009-06-21 13:41:32 +00:00
|
|
|
return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp));
|
2001-11-11 22:39:07 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2012-06-08 18:32:09 +00:00
|
|
|
* Common code for vnode open operations via a name lookup.
|
|
|
|
* Lookup the vnode and invoke VOP_CREATE if needed.
|
1994-05-24 10:09:53 +00:00
|
|
|
* Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
|
1999-12-15 23:02:35 +00:00
|
|
|
*
|
2000-05-12 16:06:49 +00:00
|
|
|
* Note that this does NOT free nameidata for the successful case,
|
1999-12-15 23:02:35 +00:00
|
|
|
* due to the NDINIT being done elsewhere.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2009-06-21 13:41:32 +00:00
|
|
|
vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags,
|
|
|
|
struct ucred *cred, struct file *fp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-07-04 03:34:11 +00:00
|
|
|
struct vnode *vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td = ndp->ni_cnd.cn_thread;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vat;
|
|
|
|
struct vattr *vap = &vat;
|
2008-10-28 13:44:11 +00:00
|
|
|
int fmode, error;
|
2006-03-28 21:22:08 +00:00
|
|
|
int vfslocked, mpsafe;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-03-28 21:22:08 +00:00
|
|
|
mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
|
2000-07-11 22:07:57 +00:00
|
|
|
restart:
|
2005-01-24 10:31:42 +00:00
|
|
|
vfslocked = 0;
|
2000-07-04 03:34:11 +00:00
|
|
|
fmode = *flagp;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (fmode & O_CREAT) {
|
|
|
|
ndp->ni_cnd.cn_nameiop = CREATE;
|
2006-03-11 17:14:05 +00:00
|
|
|
ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
|
2009-06-21 13:41:32 +00:00
|
|
|
MPSAFE;
|
1998-04-06 18:25:21 +00:00
|
|
|
if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
ndp->ni_cnd.cn_flags |= FOLLOW;
|
2009-06-21 13:41:32 +00:00
|
|
|
if (!(vn_open_flags & VN_OPEN_NOAUDIT))
|
|
|
|
ndp->ni_cnd.cn_flags |= AUDITVNODE1;
|
2000-01-10 00:08:53 +00:00
|
|
|
bwillwrite();
|
2000-07-04 03:34:11 +00:00
|
|
|
if ((error = namei(ndp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2006-03-28 21:22:08 +00:00
|
|
|
vfslocked = NDHASGIANT(ndp);
|
|
|
|
if (!mpsafe)
|
|
|
|
ndp->ni_cnd.cn_flags &= ~MPSAFE;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ndp->ni_vp == NULL) {
|
|
|
|
VATTR_NULL(vap);
|
|
|
|
vap->va_type = VREG;
|
|
|
|
vap->va_mode = cmode;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fmode & O_EXCL)
|
|
|
|
vap->va_vaflags |= VA_EXCLUSIVE;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
|
|
|
|
NDFREE(ndp, NDF_ONLY_PNBUF);
|
|
|
|
vput(ndp->ni_dvp);
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(NULL, &mp,
|
|
|
|
V_XSLEEP | PCATCH)) != 0)
|
|
|
|
return (error);
|
|
|
|
goto restart;
|
|
|
|
}
|
2002-10-19 20:56:44 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_create(cred, ndp->ni_dvp,
|
2002-10-19 20:56:44 +00:00
|
|
|
&ndp->ni_cnd, vap);
|
2009-04-10 10:52:19 +00:00
|
|
|
if (error == 0)
|
2002-10-19 20:56:44 +00:00
|
|
|
#endif
|
|
|
|
error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
|
|
|
|
&ndp->ni_cnd, vap);
|
2000-07-04 03:34:11 +00:00
|
|
|
vput(ndp->ni_dvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
1999-12-15 23:02:35 +00:00
|
|
|
if (error) {
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(ndp, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
fmode &= ~O_TRUNC;
|
|
|
|
vp = ndp->ni_vp;
|
|
|
|
} else {
|
|
|
|
if (ndp->ni_dvp == ndp->ni_vp)
|
|
|
|
vrele(ndp->ni_dvp);
|
|
|
|
else
|
|
|
|
vput(ndp->ni_dvp);
|
|
|
|
ndp->ni_dvp = NULL;
|
|
|
|
vp = ndp->ni_vp;
|
|
|
|
if (fmode & O_EXCL) {
|
|
|
|
error = EEXIST;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
fmode &= ~O_CREAT;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ndp->ni_cnd.cn_nameiop = LOOKUP;
|
2005-04-27 09:05:19 +00:00
|
|
|
ndp->ni_cnd.cn_flags = ISOPEN |
|
2002-03-12 04:00:11 +00:00
|
|
|
((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
|
2009-06-21 13:41:32 +00:00
|
|
|
LOCKLEAF | MPSAFE;
|
2009-03-11 14:13:47 +00:00
|
|
|
if (!(fmode & FWRITE))
|
|
|
|
ndp->ni_cnd.cn_flags |= LOCKSHARED;
|
2009-06-21 13:41:32 +00:00
|
|
|
if (!(vn_open_flags & VN_OPEN_NOAUDIT))
|
|
|
|
ndp->ni_cnd.cn_flags |= AUDITVNODE1;
|
2000-07-04 03:34:11 +00:00
|
|
|
if ((error = namei(ndp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2006-03-28 21:22:08 +00:00
|
|
|
if (!mpsafe)
|
|
|
|
ndp->ni_cnd.cn_flags &= ~MPSAFE;
|
|
|
|
vfslocked = NDHASGIANT(ndp);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp = ndp->ni_vp;
|
|
|
|
}
|
2012-06-08 18:32:09 +00:00
|
|
|
error = vn_open_vnode(vp, fmode, cred, td, fp);
|
|
|
|
if (error)
|
2010-03-21 20:43:23 +00:00
|
|
|
goto bad;
|
2012-06-08 18:32:09 +00:00
|
|
|
*flagp = fmode;
|
|
|
|
if (!mpsafe)
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
return (0);
|
|
|
|
bad:
|
|
|
|
NDFREE(ndp, NDF_ONLY_PNBUF);
|
|
|
|
vput(vp);
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
*flagp = fmode;
|
|
|
|
ndp->ni_vp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common code for vnode open operations once a vnode is located.
|
|
|
|
* Check permissions, and call the VOP_OPEN routine.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred,
|
|
|
|
struct thread *td, struct file *fp)
|
|
|
|
{
|
2012-07-31 18:25:00 +00:00
|
|
|
struct mount *mp;
|
2012-06-08 18:32:09 +00:00
|
|
|
accmode_t accmode;
|
2012-07-31 18:25:00 +00:00
|
|
|
struct flock lf;
|
|
|
|
int error, have_flock, lock_flags, type;
|
2012-06-08 18:32:09 +00:00
|
|
|
|
|
|
|
VFS_ASSERT_GIANT(vp->v_mount);
|
|
|
|
if (vp->v_type == VLNK)
|
|
|
|
return (EMLINK);
|
|
|
|
if (vp->v_type == VSOCK)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
if (vp->v_type != VDIR && fmode & O_DIRECTORY)
|
|
|
|
return (ENOTDIR);
|
2008-10-28 13:44:11 +00:00
|
|
|
accmode = 0;
|
2002-08-01 17:14:28 +00:00
|
|
|
if (fmode & (FWRITE | O_TRUNC)) {
|
2012-06-08 18:32:09 +00:00
|
|
|
if (vp->v_type == VDIR)
|
|
|
|
return (EISDIR);
|
2008-10-28 13:44:11 +00:00
|
|
|
accmode |= VWRITE;
|
2002-08-01 17:14:28 +00:00
|
|
|
}
|
|
|
|
if (fmode & FREAD)
|
2008-10-28 13:44:11 +00:00
|
|
|
accmode |= VREAD;
|
2008-03-31 11:57:18 +00:00
|
|
|
if (fmode & FEXEC)
|
2008-10-28 13:44:11 +00:00
|
|
|
accmode |= VEXEC;
|
2009-12-08 20:47:10 +00:00
|
|
|
if ((fmode & O_APPEND) && (fmode & FWRITE))
|
2009-11-04 07:14:16 +00:00
|
|
|
accmode |= VAPPEND;
|
2002-08-01 17:14:28 +00:00
|
|
|
#ifdef MAC
|
2008-10-28 13:44:11 +00:00
|
|
|
error = mac_vnode_check_open(cred, vp, accmode);
|
2002-08-01 17:14:28 +00:00
|
|
|
if (error)
|
2012-06-08 18:32:09 +00:00
|
|
|
return (error);
|
2002-08-01 17:14:28 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((fmode & O_CREAT) == 0) {
|
2008-10-28 13:44:11 +00:00
|
|
|
if (accmode & VWRITE) {
|
1994-10-02 17:35:40 +00:00
|
|
|
error = vn_writechk(vp);
|
1995-05-10 18:59:11 +00:00
|
|
|
if (error)
|
2012-06-08 18:32:09 +00:00
|
|
|
return (error);
|
1998-11-02 02:36:16 +00:00
|
|
|
}
|
2008-10-28 13:44:11 +00:00
|
|
|
if (accmode) {
|
|
|
|
error = VOP_ACCESS(vp, accmode, cred, td);
|
1995-05-10 18:59:11 +00:00
|
|
|
if (error)
|
2012-06-08 18:32:09 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
2007-05-31 11:51:53 +00:00
|
|
|
if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
|
2012-06-08 18:32:09 +00:00
|
|
|
return (error);
|
1996-08-21 21:56:23 +00:00
|
|
|
|
2012-07-31 18:25:00 +00:00
|
|
|
if (fmode & (O_EXLOCK | O_SHLOCK)) {
|
|
|
|
KASSERT(fp != NULL, ("open with flock requires fp"));
|
|
|
|
lock_flags = VOP_ISLOCKED(vp);
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
if (fmode & O_EXLOCK)
|
|
|
|
lf.l_type = F_WRLCK;
|
|
|
|
else
|
|
|
|
lf.l_type = F_RDLCK;
|
|
|
|
type = F_FLOCK;
|
|
|
|
if ((fmode & FNONBLOCK) == 0)
|
|
|
|
type |= F_WAIT;
|
|
|
|
error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type);
|
|
|
|
have_flock = (error == 0);
|
|
|
|
vn_lock(vp, lock_flags | LK_RETRY);
|
|
|
|
if (error == 0 && vp->v_iflag & VI_DOOMED)
|
|
|
|
error = ENOENT;
|
|
|
|
/*
|
|
|
|
* Another thread might have used this vnode as an
|
|
|
|
* executable while the vnode lock was dropped.
|
|
|
|
* Ensure the vnode is still able to be opened for
|
|
|
|
* writing after the lock has been obtained.
|
|
|
|
*/
|
|
|
|
if (error == 0 && accmode & VWRITE)
|
|
|
|
error = vn_writechk(vp);
|
|
|
|
if (error) {
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
if (have_flock) {
|
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
lf.l_type = F_UNLCK;
|
|
|
|
(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf,
|
|
|
|
F_FLOCK);
|
|
|
|
}
|
|
|
|
vn_start_write(vp, &mp, V_WAIT);
|
|
|
|
vn_lock(vp, lock_flags | LK_RETRY);
|
|
|
|
(void)VOP_CLOSE(vp, fmode, cred, td);
|
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
fp->f_flag |= FHASLOCK;
|
|
|
|
}
|
2012-03-08 20:27:20 +00:00
|
|
|
if (fmode & FWRITE) {
|
NOTE: libkvm, w, ps, 'top', and any other utility which depends on struct
proc or any VM system structure will have to be rebuilt!!!
Much needed overhaul of the VM system. Included in this first round of
changes:
1) Improved pager interfaces: init, alloc, dealloc, getpages, putpages,
haspage, and sync operations are supported. The haspage interface now
provides information about clusterability. All pager routines now take
struct vm_object's instead of "pagers".
2) Improved data structures. In the previous paradigm, there is constant
confusion caused by pagers being both a data structure ("allocate a
pager") and a collection of routines. The idea of a pager structure has
escentially been eliminated. Objects now have types, and this type is
used to index the appropriate pager. In most cases, items in the pager
structure were duplicated in the object data structure and thus were
unnecessary. In the few cases that remained, a un_pager structure union
was created in the object to contain these items.
3) Because of the cleanup of #1 & #2, a lot of unnecessary layering can now
be removed. For instance, vm_object_enter(), vm_object_lookup(),
vm_object_remove(), and the associated object hash list were some of the
things that were removed.
4) simple_lock's removed. Discussion with several people reveals that the
SMP locking primitives used in the VM system aren't likely the mechanism
that we'll be adopting. Even if it were, the locking that was in the code
was very inadequate and would have to be mostly re-done anyway. The
locking in a uni-processor kernel was a no-op but went a long way toward
making the code difficult to read and debug.
5) Places that attempted to kludge-up the fact that we don't have kernel
thread support have been fixed to reflect the reality that we are really
dealing with processes, not threads. The VM system didn't have complete
thread support, so the comments and mis-named routines were just wrong.
We now use tsleep and wakeup directly in the lock routines, for instance.
6) Where appropriate, the pagers have been improved, especially in the
pager_alloc routines. Most of the pager_allocs have been rewritten and
are now faster and easier to maintain.
7) The pagedaemon pageout clustering algorithm has been rewritten and
now tries harder to output an even number of pages before and after
the requested page. This is sort of the reverse of the ideal pagein
algorithm and should provide better overall performance.
8) Unnecessary (incorrect) casts to caddr_t in calls to tsleep & wakeup
have been removed. Some other unnecessary casts have also been removed.
9) Some almost useless debugging code removed.
10) Terminology of shadow objects vs. backing objects straightened out.
The fact that the vm_object data structure escentially had this
backwards really confused things. The use of "shadow" and "backing
object" throughout the code is now internally consistent and correct
in the Mach terminology.
11) Several minor bug fixes, including one in the vm daemon that caused
0 RSS objects to not get purged as intended.
12) A "default pager" has now been created which cleans up the transition
of objects to the "swap" type. The previous checks throughout the code
for swp->pg_data != NULL were really ugly. This change also provides
the rudiments for future backing of "anonymous" memory by something
other than the swap pager (via the vnode pager, for example), and it
allows the decision about which of these pagers to use to be made
dynamically (although will need some additional decision code to do
this, of course).
13) (dyson) MAP_COPY has been deprecated and the corresponding "copy
object" code has been removed. MAP_COPY was undocumented and non-
standard. It was furthermore broken in several ways which caused its
behavior to degrade to MAP_PRIVATE. Binaries that use MAP_COPY will
continue to work correctly, but via the slightly different semantics
of MAP_PRIVATE.
14) (dyson) Sharing maps have been removed. It's marginal usefulness in a
threads design can be worked around in other ways. Both #12 and #13
were done to simplify the code and improve readability and maintain-
ability. (As were most all of these changes)
TODO:
1) Rewrite most of the vnode pager to use VOP_GETPAGES/PUTPAGES. Doing
this will reduce the vnode pager to a mere fraction of its current size.
2) Rewrite vm_fault and the swap/vnode pagers to use the clustering
information provided by the new haspage pager interface. This will
substantially reduce the overhead by eliminating a large number of
VOP_BMAP() calls. The VOP_BMAP() filesystem interface should be
improved to provide both a "behind" and "ahead" indication of
contiguousness.
3) Implement the extended features of pager_haspage in swap_pager_haspage().
It currently just says 0 pages ahead/behind.
4) Re-implement the swap device (swstrategy) in a more elegant way, perhaps
via a much more general mechanism that could also be used for disk
striping of regular filesystems.
5) Do something to improve the architecture of vm_object_collapse(). The
fact that it makes calls into the swap pager and knows too much about
how the swap pager operates really bothers me. It also doesn't allow
for collapsing of non-swap pager objects ("unnamed" objects backed by
other pagers).
1995-07-13 08:48:48 +00:00
|
|
|
vp->v_writecount++;
|
2012-03-08 20:27:20 +00:00
|
|
|
CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
|
|
|
|
__func__, vp, vp->v_writecount);
|
|
|
|
}
|
2012-06-08 18:32:09 +00:00
|
|
|
ASSERT_VOP_LOCKED(vp, "vn_open_vnode");
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for write permissions on the specified vnode.
|
1997-02-10 02:22:35 +00:00
|
|
|
* Prototype text segments cannot be written.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
vn_writechk(vp)
|
|
|
|
register struct vnode *vp;
|
|
|
|
{
|
|
|
|
|
2002-08-04 10:29:36 +00:00
|
|
|
ASSERT_VOP_LOCKED(vp, "vn_writechk");
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If there's shared text associated with
|
|
|
|
* the vnode, try to free it up once. If
|
|
|
|
* we fail, we can't allow writing.
|
|
|
|
*/
|
2012-09-28 11:25:02 +00:00
|
|
|
if (VOP_IS_TEXT(vp))
|
1994-05-24 10:09:53 +00:00
|
|
|
return (ETXTBSY);
|
2002-08-04 10:29:36 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode close call
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
vn_close(vp, flags, file_cred, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct vnode *vp;
|
|
|
|
int flags;
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
struct ucred *file_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2005-03-13 11:56:28 +00:00
|
|
|
struct mount *mp;
|
2009-03-11 14:13:47 +00:00
|
|
|
int error, lock_flags;
|
|
|
|
|
|
|
|
if (!(flags & FWRITE) && vp->v_mount != NULL &&
|
|
|
|
vp->v_mount->mnt_kern_flag & MNTK_EXTENDED_SHARED)
|
|
|
|
lock_flags = LK_SHARED;
|
|
|
|
else
|
|
|
|
lock_flags = LK_EXCLUSIVE;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_ASSERT_GIANT(vp->v_mount);
|
2004-08-06 22:25:35 +00:00
|
|
|
|
2005-03-13 11:56:28 +00:00
|
|
|
vn_start_write(vp, &mp, V_WAIT);
|
2009-03-11 14:13:47 +00:00
|
|
|
vn_lock(vp, lock_flags | LK_RETRY);
|
2007-02-12 22:53:01 +00:00
|
|
|
if (flags & FWRITE) {
|
|
|
|
VNASSERT(vp->v_writecount > 0, vp,
|
|
|
|
("vn_close: negative writecount"));
|
1994-05-24 10:09:53 +00:00
|
|
|
vp->v_writecount--;
|
2012-03-08 20:27:20 +00:00
|
|
|
CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
|
|
|
|
__func__, vp, vp->v_writecount);
|
2007-02-12 22:53:01 +00:00
|
|
|
}
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
error = VOP_CLOSE(vp, flags, file_cred, td);
|
2005-03-13 11:56:28 +00:00
|
|
|
vput(vp);
|
|
|
|
vn_finished_write(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
2008-01-05 08:54:51 +00:00
|
|
|
* Heuristic to detect sequential operation.
|
2002-03-05 15:38:49 +00:00
|
|
|
*/
|
2008-01-05 08:54:51 +00:00
|
|
|
static int
|
2000-04-02 00:55:28 +00:00
|
|
|
sequential_heuristic(struct uio *uio, struct file *fp)
|
|
|
|
{
|
2002-01-13 11:58:06 +00:00
|
|
|
|
2009-09-28 16:59:47 +00:00
|
|
|
if (atomic_load_acq_int(&(fp->f_flag)) & FRDAHEAD)
|
|
|
|
return (fp->f_seqcount << IO_SEQSHIFT);
|
|
|
|
|
2008-01-05 08:54:51 +00:00
|
|
|
/*
|
|
|
|
* Offset 0 is handled specially. open() sets f_seqcount to 1 so
|
|
|
|
* that the first I/O is normally considered to be slightly
|
|
|
|
* sequential. Seeking to offset 0 doesn't change sequentiality
|
|
|
|
* unless previous seeks have reduced f_seqcount to 0, in which
|
|
|
|
* case offset 0 is not special.
|
|
|
|
*/
|
2000-04-02 00:55:28 +00:00
|
|
|
if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
|
|
|
|
uio->uio_offset == fp->f_nextoff) {
|
|
|
|
/*
|
2008-01-05 08:54:51 +00:00
|
|
|
* f_seqcount is in units of fixed-size blocks so that it
|
|
|
|
* depends mainly on the amount of sequential I/O and not
|
|
|
|
* much on the number of sequential I/O's. The fixed size
|
|
|
|
* of 16384 is hard-coded here since it is (not quite) just
|
|
|
|
* a magic size that works well here. This size is more
|
|
|
|
* closely related to the best I/O size for real disks than
|
|
|
|
* to any block size used by software.
|
2000-04-02 00:55:28 +00:00
|
|
|
*/
|
2008-01-05 08:54:51 +00:00
|
|
|
fp->f_seqcount += howmany(uio->uio_resid, 16384);
|
2002-12-28 20:28:10 +00:00
|
|
|
if (fp->f_seqcount > IO_SEQMAX)
|
|
|
|
fp->f_seqcount = IO_SEQMAX;
|
2008-01-05 08:54:51 +00:00
|
|
|
return (fp->f_seqcount << IO_SEQSHIFT);
|
2000-04-02 00:55:28 +00:00
|
|
|
}
|
|
|
|
|
2008-01-05 08:54:51 +00:00
|
|
|
/* Not sequential. Quickly draw-down sequentiality. */
|
2000-04-02 00:55:28 +00:00
|
|
|
if (fp->f_seqcount > 1)
|
|
|
|
fp->f_seqcount = 1;
|
|
|
|
else
|
|
|
|
fp->f_seqcount = 0;
|
2008-01-05 08:54:51 +00:00
|
|
|
return (0);
|
2000-04-02 00:55:28 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Package up an I/O request on a vnode into a uio and do it.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
|
|
|
|
enum uio_seg segflg, int ioflg, struct ucred *active_cred,
|
|
|
|
struct ucred *file_cred, ssize_t *aresid, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
struct ucred *cred;
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
void *rl_cookie;
|
2009-06-04 16:18:07 +00:00
|
|
|
int error, lock_flags;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_ASSERT_GIANT(vp->v_mount);
|
2004-08-06 22:25:35 +00:00
|
|
|
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
aiov.iov_base = base;
|
|
|
|
aiov.iov_len = len;
|
|
|
|
auio.uio_resid = len;
|
|
|
|
auio.uio_offset = offset;
|
|
|
|
auio.uio_segflg = segflg;
|
|
|
|
auio.uio_rw = rw;
|
|
|
|
auio.uio_td = td;
|
|
|
|
error = 0;
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((ioflg & IO_NODELOCKED) == 0) {
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
if (rw == UIO_READ) {
|
|
|
|
rl_cookie = vn_rangelock_rlock(vp, offset,
|
|
|
|
offset + len);
|
|
|
|
} else {
|
|
|
|
rl_cookie = vn_rangelock_wlock(vp, offset,
|
|
|
|
offset + len);
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
mp = NULL;
|
2002-06-28 17:51:11 +00:00
|
|
|
if (rw == UIO_WRITE) {
|
|
|
|
if (vp->v_type != VCHR &&
|
|
|
|
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
|
|
|
|
!= 0)
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
goto out;
|
2009-06-08 21:23:54 +00:00
|
|
|
if (MNT_SHARED_WRITES(mp) ||
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount)))
|
2009-06-04 16:18:07 +00:00
|
|
|
lock_flags = LK_SHARED;
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
else
|
2009-06-04 16:18:07 +00:00
|
|
|
lock_flags = LK_EXCLUSIVE;
|
2009-04-13 23:09:44 +00:00
|
|
|
} else
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
lock_flags = LK_SHARED;
|
|
|
|
vn_lock(vp, lock_flags | LK_RETRY);
|
|
|
|
} else
|
|
|
|
rl_cookie = NULL;
|
2002-06-28 17:51:11 +00:00
|
|
|
|
2005-04-05 01:11:43 +00:00
|
|
|
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
|
2002-08-12 16:15:34 +00:00
|
|
|
#ifdef MAC
|
|
|
|
if ((ioflg & IO_NOMACCHECK) == 0) {
|
|
|
|
if (rw == UIO_READ)
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_read(active_cred, file_cred,
|
2002-08-19 19:04:53 +00:00
|
|
|
vp);
|
2002-08-12 16:15:34 +00:00
|
|
|
else
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_write(active_cred, file_cred,
|
2002-08-19 19:04:53 +00:00
|
|
|
vp);
|
2002-08-12 16:15:34 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (error == 0) {
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
if (file_cred != NULL)
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
cred = file_cred;
|
|
|
|
else
|
|
|
|
cred = active_cred;
|
2002-08-12 16:15:34 +00:00
|
|
|
if (rw == UIO_READ)
|
|
|
|
error = VOP_READ(vp, &auio, ioflg, cred);
|
|
|
|
else
|
|
|
|
error = VOP_WRITE(vp, &auio, ioflg, cred);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (aresid)
|
|
|
|
*aresid = auio.uio_resid;
|
|
|
|
else
|
|
|
|
if (auio.uio_resid && error == 0)
|
|
|
|
error = EIO;
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((ioflg & IO_NODELOCKED) == 0) {
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
if (mp != NULL)
|
|
|
|
vn_finished_write(mp);
|
2000-07-11 22:07:57 +00:00
|
|
|
}
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
out:
|
|
|
|
if (rl_cookie != NULL)
|
|
|
|
vn_rangelock_unlock(vp, rl_cookie);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2001-09-08 20:02:33 +00:00
|
|
|
/*
|
|
|
|
* Package up an I/O request on a vnode into a uio and do it. The I/O
|
|
|
|
* request is split up into smaller chunks and we try to avoid saturating
|
|
|
|
* the buffer cache while potentially holding a vnode locked, so we
|
2011-02-08 00:16:36 +00:00
|
|
|
* check bwillwrite() before calling vn_rdwr(). We also call kern_yield()
|
2001-09-26 06:54:32 +00:00
|
|
|
* to give other processes a chance to lock the vnode (either other processes
|
|
|
|
* core'ing the same binary, or unrelated processes scanning the directory).
|
2001-09-08 20:02:33 +00:00
|
|
|
*/
|
|
|
|
int
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
|
|
|
|
file_cred, aresid, td)
|
2001-09-08 20:02:33 +00:00
|
|
|
enum uio_rw rw;
|
|
|
|
struct vnode *vp;
|
2005-12-14 00:49:52 +00:00
|
|
|
void *base;
|
2004-06-05 02:18:28 +00:00
|
|
|
size_t len;
|
2001-09-08 20:02:33 +00:00
|
|
|
off_t offset;
|
|
|
|
enum uio_seg segflg;
|
|
|
|
int ioflg;
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
struct ucred *active_cred;
|
|
|
|
struct ucred *file_cred;
|
2004-06-05 02:18:28 +00:00
|
|
|
size_t *aresid;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
2001-09-08 20:02:33 +00:00
|
|
|
{
|
|
|
|
int error = 0;
|
2012-02-21 01:05:12 +00:00
|
|
|
ssize_t iaresid;
|
2001-09-08 20:02:33 +00:00
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_ASSERT_GIANT(vp->v_mount);
|
2004-08-06 22:25:35 +00:00
|
|
|
|
2001-09-08 20:02:33 +00:00
|
|
|
do {
|
2004-03-13 02:56:27 +00:00
|
|
|
int chunk;
|
2001-09-08 20:02:33 +00:00
|
|
|
|
2004-03-13 02:56:27 +00:00
|
|
|
/*
|
|
|
|
* Force `offset' to a multiple of MAXBSIZE except possibly
|
|
|
|
* for the first chunk, so that filesystems only need to
|
|
|
|
* write full blocks except possibly for the first and last
|
|
|
|
* chunks.
|
|
|
|
*/
|
|
|
|
chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
|
|
|
|
|
|
|
|
if (chunk > len)
|
|
|
|
chunk = len;
|
2001-09-08 20:02:33 +00:00
|
|
|
if (rw != UIO_READ && vp->v_type == VREG)
|
|
|
|
bwillwrite();
|
2004-06-05 02:18:28 +00:00
|
|
|
iaresid = 0;
|
2001-09-08 20:02:33 +00:00
|
|
|
error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
|
2004-06-05 02:18:28 +00:00
|
|
|
ioflg, active_cred, file_cred, &iaresid, td);
|
2001-09-08 20:02:33 +00:00
|
|
|
len -= chunk; /* aresid calc already includes length */
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
offset += chunk;
|
2005-12-14 00:49:52 +00:00
|
|
|
base = (char *)base + chunk;
|
2011-05-13 05:27:58 +00:00
|
|
|
kern_yield(PRI_USER);
|
2001-09-08 20:02:33 +00:00
|
|
|
} while (len);
|
|
|
|
if (aresid)
|
2004-06-05 02:18:28 +00:00
|
|
|
*aresid = len + iaresid;
|
2001-09-08 20:02:33 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2012-07-02 21:01:03 +00:00
|
|
|
off_t
|
|
|
|
foffset_lock(struct file *fp, int flags)
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
{
|
|
|
|
struct mtx *mtxp;
|
2012-07-02 21:01:03 +00:00
|
|
|
off_t res;
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
|
2012-07-02 21:01:03 +00:00
|
|
|
KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
|
|
|
|
|
|
|
|
#if OFF_MAX <= LONG_MAX
|
|
|
|
/*
|
|
|
|
* Caller only wants the current f_offset value. Assume that
|
|
|
|
* the long and shorter integer types reads are atomic.
|
|
|
|
*/
|
|
|
|
if ((flags & FOF_NOLOCK) != 0)
|
|
|
|
return (fp->f_offset);
|
|
|
|
#endif
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* According to McKusick the vn lock was protecting f_offset here.
|
|
|
|
* It is now protected by the FOFFSET_LOCKED flag.
|
|
|
|
*/
|
|
|
|
mtxp = mtx_pool_find(mtxpool_sleep, fp);
|
|
|
|
mtx_lock(mtxp);
|
2012-07-02 21:01:03 +00:00
|
|
|
if ((flags & FOF_NOLOCK) == 0) {
|
|
|
|
while (fp->f_vnread_flags & FOFFSET_LOCKED) {
|
|
|
|
fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
|
|
|
|
msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
|
|
|
|
"vofflock", 0);
|
|
|
|
}
|
|
|
|
fp->f_vnread_flags |= FOFFSET_LOCKED;
|
|
|
|
}
|
|
|
|
res = fp->f_offset;
|
|
|
|
mtx_unlock(mtxp);
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
foffset_unlock(struct file *fp, off_t val, int flags)
|
|
|
|
{
|
|
|
|
struct mtx *mtxp;
|
|
|
|
|
|
|
|
KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
|
|
|
|
|
|
|
|
#if OFF_MAX <= LONG_MAX
|
|
|
|
if ((flags & FOF_NOLOCK) != 0) {
|
|
|
|
if ((flags & FOF_NOUPDATE) == 0)
|
|
|
|
fp->f_offset = val;
|
|
|
|
if ((flags & FOF_NEXTOFF) != 0)
|
|
|
|
fp->f_nextoff = val;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
mtxp = mtx_pool_find(mtxpool_sleep, fp);
|
|
|
|
mtx_lock(mtxp);
|
|
|
|
if ((flags & FOF_NOUPDATE) == 0)
|
|
|
|
fp->f_offset = val;
|
|
|
|
if ((flags & FOF_NEXTOFF) != 0)
|
|
|
|
fp->f_nextoff = val;
|
|
|
|
if ((flags & FOF_NOLOCK) == 0) {
|
|
|
|
KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0,
|
|
|
|
("Lost FOFFSET_LOCKED"));
|
|
|
|
if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
|
|
|
|
wakeup(&fp->f_vnread_flags);
|
|
|
|
fp->f_vnread_flags = 0;
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
}
|
|
|
|
mtx_unlock(mtxp);
|
|
|
|
}
|
|
|
|
|
2012-07-02 21:01:03 +00:00
|
|
|
void
|
|
|
|
foffset_lock_uio(struct file *fp, struct uio *uio, int flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
if ((flags & FOF_OFFSET) == 0)
|
|
|
|
uio->uio_offset = foffset_lock(fp, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
foffset_unlock_uio(struct file *fp, struct uio *uio, int flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
if ((flags & FOF_OFFSET) == 0)
|
|
|
|
foffset_unlock(fp, uio->uio_offset, flags);
|
|
|
|
}
|
|
|
|
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
static int
|
|
|
|
get_advice(struct file *fp, struct uio *uio)
|
|
|
|
{
|
|
|
|
struct mtx *mtxp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = POSIX_FADV_NORMAL;
|
|
|
|
if (fp->f_advice == NULL)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
mtxp = mtx_pool_find(mtxpool_sleep, fp);
|
|
|
|
mtx_lock(mtxp);
|
|
|
|
if (uio->uio_offset >= fp->f_advice->fa_start &&
|
|
|
|
uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end)
|
|
|
|
ret = fp->f_advice->fa_advice;
|
|
|
|
mtx_unlock(mtxp);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* File table vnode read routine.
|
|
|
|
*/
|
1995-12-17 21:23:44 +00:00
|
|
|
static int
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
vn_read(fp, uio, active_cred, flags, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct uio *uio;
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
struct ucred *active_cred;
|
1999-04-04 21:41:28 +00:00
|
|
|
int flags;
|
2010-06-26 21:44:45 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
struct vnode *vp;
|
2007-12-30 01:42:15 +00:00
|
|
|
struct mtx *mtxp;
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
int error, ioflag;
|
2011-11-04 04:02:50 +00:00
|
|
|
int advice, vfslocked;
|
2012-06-19 18:42:24 +00:00
|
|
|
off_t offset, start, end;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
|
|
|
|
uio->uio_td, td));
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET"));
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
1999-04-21 05:56:45 +00:00
|
|
|
ioflag = 0;
|
|
|
|
if (fp->f_flag & FNONBLOCK)
|
|
|
|
ioflag |= IO_NDELAY;
|
2001-05-24 07:22:27 +00:00
|
|
|
if (fp->f_flag & O_DIRECT)
|
|
|
|
ioflag |= IO_DIRECT;
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
advice = get_advice(fp, uio);
|
2005-01-24 10:31:42 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2011-11-04 04:02:50 +00:00
|
|
|
vn_lock(vp, LK_SHARED | LK_RETRY);
|
|
|
|
|
|
|
|
switch (advice) {
|
|
|
|
case POSIX_FADV_NORMAL:
|
|
|
|
case POSIX_FADV_SEQUENTIAL:
|
2012-01-30 19:35:15 +00:00
|
|
|
case POSIX_FADV_NOREUSE:
|
2011-11-04 04:02:50 +00:00
|
|
|
ioflag |= sequential_heuristic(uio, fp);
|
|
|
|
break;
|
|
|
|
case POSIX_FADV_RANDOM:
|
|
|
|
/* Disable read-ahead for random I/O. */
|
|
|
|
break;
|
|
|
|
}
|
2012-01-30 19:35:15 +00:00
|
|
|
offset = uio->uio_offset;
|
2000-04-02 00:55:28 +00:00
|
|
|
|
2002-08-01 17:23:22 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
|
2002-08-01 17:23:22 +00:00
|
|
|
if (error == 0)
|
|
|
|
#endif
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
error = VOP_READ(vp, uio, ioflag, fp->f_cred);
|
2000-04-02 00:55:28 +00:00
|
|
|
fp->f_nextoff = uio->uio_offset;
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2012-01-30 19:35:15 +00:00
|
|
|
if (error == 0 && advice == POSIX_FADV_NOREUSE &&
|
2012-06-19 18:42:24 +00:00
|
|
|
offset != uio->uio_offset) {
|
|
|
|
/*
|
|
|
|
* Use POSIX_FADV_DONTNEED to flush clean pages and
|
|
|
|
* buffers for the backing file after a
|
|
|
|
* POSIX_FADV_NOREUSE read(2). To optimize the common
|
|
|
|
* case of using POSIX_FADV_NOREUSE with sequential
|
|
|
|
* access, track the previous implicit DONTNEED
|
|
|
|
* request and grow this request to include the
|
|
|
|
* current read(2) in addition to the previous
|
|
|
|
* DONTNEED. With purely sequential access this will
|
|
|
|
* cause the DONTNEED requests to continously grow to
|
|
|
|
* cover all of the previously read regions of the
|
|
|
|
* file. This allows filesystem blocks that are
|
|
|
|
* accessed by multiple calls to read(2) to be flushed
|
|
|
|
* once the last read(2) finishes.
|
|
|
|
*/
|
|
|
|
start = offset;
|
|
|
|
end = uio->uio_offset - 1;
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
mtxp = mtx_pool_find(mtxpool_sleep, fp);
|
2012-06-19 18:42:24 +00:00
|
|
|
mtx_lock(mtxp);
|
|
|
|
if (fp->f_advice != NULL &&
|
|
|
|
fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) {
|
|
|
|
if (start != 0 && fp->f_advice->fa_prevend + 1 == start)
|
|
|
|
start = fp->f_advice->fa_prevstart;
|
|
|
|
else if (fp->f_advice->fa_prevstart != 0 &&
|
|
|
|
fp->f_advice->fa_prevstart == end + 1)
|
|
|
|
end = fp->f_advice->fa_prevend;
|
|
|
|
fp->f_advice->fa_prevstart = start;
|
|
|
|
fp->f_advice->fa_prevend = end;
|
|
|
|
}
|
|
|
|
mtx_unlock(mtxp);
|
|
|
|
error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED);
|
|
|
|
}
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File table vnode write routine.
|
|
|
|
*/
|
1995-12-17 21:23:44 +00:00
|
|
|
static int
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
vn_write(fp, uio, active_cred, flags, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct uio *uio;
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
struct ucred *active_cred;
|
1999-04-04 21:41:28 +00:00
|
|
|
int flags;
|
2010-06-26 21:44:45 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1999-07-08 06:06:00 +00:00
|
|
|
struct vnode *vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
struct mount *mp;
|
2011-11-04 04:02:50 +00:00
|
|
|
struct mtx *mtxp;
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
int error, ioflag, lock_flags;
|
2011-11-04 04:02:50 +00:00
|
|
|
int advice, vfslocked;
|
2012-06-19 18:42:24 +00:00
|
|
|
off_t offset, start, end;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
|
|
|
|
uio->uio_td, td));
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET"));
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
2005-01-24 10:31:42 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
1999-07-08 06:06:00 +00:00
|
|
|
if (vp->v_type == VREG)
|
|
|
|
bwillwrite();
|
1999-04-21 05:56:45 +00:00
|
|
|
ioflag = IO_UNIT;
|
|
|
|
if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
|
1994-05-24 10:09:53 +00:00
|
|
|
ioflag |= IO_APPEND;
|
|
|
|
if (fp->f_flag & FNONBLOCK)
|
|
|
|
ioflag |= IO_NDELAY;
|
2001-05-24 07:22:27 +00:00
|
|
|
if (fp->f_flag & O_DIRECT)
|
|
|
|
ioflag |= IO_DIRECT;
|
1997-02-10 02:22:35 +00:00
|
|
|
if ((fp->f_flag & O_FSYNC) ||
|
|
|
|
(vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
|
|
|
|
ioflag |= IO_SYNC;
|
2000-07-11 22:07:57 +00:00
|
|
|
mp = NULL;
|
2000-11-02 21:14:13 +00:00
|
|
|
if (vp->v_type != VCHR &&
|
2005-01-24 10:31:42 +00:00
|
|
|
(error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
|
|
|
goto unlock;
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
|
|
|
|
advice = get_advice(fp, uio);
|
2012-09-25 21:31:17 +00:00
|
|
|
|
|
|
|
if (MNT_SHARED_WRITES(mp) ||
|
|
|
|
(mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) {
|
2009-06-04 16:18:07 +00:00
|
|
|
lock_flags = LK_SHARED;
|
|
|
|
} else {
|
|
|
|
lock_flags = LK_EXCLUSIVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
vn_lock(vp, lock_flags | LK_RETRY);
|
2011-11-04 04:02:50 +00:00
|
|
|
switch (advice) {
|
|
|
|
case POSIX_FADV_NORMAL:
|
|
|
|
case POSIX_FADV_SEQUENTIAL:
|
2012-06-19 18:42:24 +00:00
|
|
|
case POSIX_FADV_NOREUSE:
|
2011-11-04 04:02:50 +00:00
|
|
|
ioflag |= sequential_heuristic(uio, fp);
|
|
|
|
break;
|
|
|
|
case POSIX_FADV_RANDOM:
|
|
|
|
/* XXX: Is this correct? */
|
|
|
|
break;
|
|
|
|
}
|
2012-06-19 18:42:24 +00:00
|
|
|
offset = uio->uio_offset;
|
2011-11-04 04:02:50 +00:00
|
|
|
|
2002-08-01 17:23:22 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
|
2002-08-01 17:23:22 +00:00
|
|
|
if (error == 0)
|
|
|
|
#endif
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
|
2000-04-02 00:55:28 +00:00
|
|
|
fp->f_nextoff = uio->uio_offset;
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2006-04-28 21:54:05 +00:00
|
|
|
if (vp->v_type != VCHR)
|
|
|
|
vn_finished_write(mp);
|
2012-06-19 18:42:24 +00:00
|
|
|
if (error == 0 && advice == POSIX_FADV_NOREUSE &&
|
|
|
|
offset != uio->uio_offset) {
|
|
|
|
/*
|
|
|
|
* Use POSIX_FADV_DONTNEED to flush clean pages and
|
|
|
|
* buffers for the backing file after a
|
|
|
|
* POSIX_FADV_NOREUSE write(2). To optimize the
|
|
|
|
* common case of using POSIX_FADV_NOREUSE with
|
|
|
|
* sequential access, track the previous implicit
|
|
|
|
* DONTNEED request and grow this request to include
|
|
|
|
* the current write(2) in addition to the previous
|
|
|
|
* DONTNEED. With purely sequential access this will
|
|
|
|
* cause the DONTNEED requests to continously grow to
|
|
|
|
* cover all of the previously written regions of the
|
|
|
|
* file.
|
|
|
|
*
|
|
|
|
* Note that the blocks just written are almost
|
|
|
|
* certainly still dirty, so this only works when
|
|
|
|
* VOP_ADVISE() calls from subsequent writes push out
|
|
|
|
* the data written by this write(2) once the backing
|
|
|
|
* buffers are clean. However, as compared to forcing
|
|
|
|
* IO_DIRECT, this gives much saner behavior. Write
|
|
|
|
* clustering is still allowed, and clean pages are
|
|
|
|
* merely moved to the cache page queue rather than
|
|
|
|
* outright thrown away. This means a subsequent
|
|
|
|
* read(2) can still avoid hitting the disk if the
|
|
|
|
* pages have not been reclaimed.
|
|
|
|
*
|
|
|
|
* This does make POSIX_FADV_NOREUSE largely useless
|
|
|
|
* with non-sequential access. However, sequential
|
|
|
|
* access is the more common use case and the flag is
|
|
|
|
* merely advisory.
|
|
|
|
*/
|
|
|
|
start = offset;
|
|
|
|
end = uio->uio_offset - 1;
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
mtxp = mtx_pool_find(mtxpool_sleep, fp);
|
2012-06-19 18:42:24 +00:00
|
|
|
mtx_lock(mtxp);
|
|
|
|
if (fp->f_advice != NULL &&
|
|
|
|
fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) {
|
|
|
|
if (start != 0 && fp->f_advice->fa_prevend + 1 == start)
|
|
|
|
start = fp->f_advice->fa_prevstart;
|
|
|
|
else if (fp->f_advice->fa_prevstart != 0 &&
|
|
|
|
fp->f_advice->fa_prevstart == end + 1)
|
|
|
|
end = fp->f_advice->fa_prevend;
|
|
|
|
fp->f_advice->fa_prevstart = start;
|
|
|
|
fp->f_advice->fa_prevend = end;
|
|
|
|
}
|
|
|
|
mtx_unlock(mtxp);
|
|
|
|
error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED);
|
|
|
|
}
|
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
unlock:
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
static const int io_hold_cnt = 16;
|
2012-06-03 16:19:37 +00:00
|
|
|
static int vn_io_fault_enable = 1;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW,
|
|
|
|
&vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance");
|
2012-06-03 16:06:56 +00:00
|
|
|
static unsigned long vn_io_faults_cnt;
|
|
|
|
SYSCTL_LONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD,
|
|
|
|
&vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers");
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
|
2008-01-07 20:05:19 +00:00
|
|
|
/*
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
* The vn_io_fault() is a wrapper around vn_read() and vn_write() to
|
|
|
|
* prevent the following deadlock:
|
|
|
|
*
|
|
|
|
* Assume that the thread A reads from the vnode vp1 into userspace
|
|
|
|
* buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is
|
|
|
|
* currently not resident, then system ends up with the call chain
|
|
|
|
* vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] ->
|
|
|
|
* vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2)
|
|
|
|
* which establishes lock order vp1->vn_lock, then vp2->vn_lock.
|
|
|
|
* If, at the same time, thread B reads from vnode vp2 into buffer buf2
|
|
|
|
* backed by the pages of vnode vp1, and some page in buf2 is not
|
|
|
|
* resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock.
|
|
|
|
*
|
|
|
|
* To prevent the lock order reversal and deadlock, vn_io_fault() does
|
|
|
|
* not allow page faults to happen during VOP_READ() or VOP_WRITE().
|
|
|
|
* Instead, it first tries to do the whole range i/o with pagefaults
|
|
|
|
* disabled. If all pages in the i/o buffer are resident and mapped,
|
|
|
|
* VOP will succeed (ignoring the genuine filesystem errors).
|
|
|
|
* Otherwise, we get back EFAULT, and vn_io_fault() falls back to do
|
|
|
|
* i/o in chunks, with all pages in the chunk prefaulted and held
|
|
|
|
* using vm_fault_quick_hold_pages().
|
|
|
|
*
|
|
|
|
* Filesystems using this deadlock avoidance scheme should use the
|
|
|
|
* array of the held pages from uio, saved in the curthread->td_ma,
|
|
|
|
* instead of doing uiomove(). A helper function
|
|
|
|
* vn_io_fault_uiomove() converts uiomove request into
|
|
|
|
* uiomove_fromphys() over td_ma array.
|
|
|
|
*
|
|
|
|
* Since vnode locks do not cover the whole i/o anymore, rangelocks
|
|
|
|
* make the current i/o request atomic with respect to other i/os and
|
|
|
|
* truncations.
|
2008-01-07 20:05:19 +00:00
|
|
|
*/
|
|
|
|
static int
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred,
|
|
|
|
int flags, struct thread *td)
|
|
|
|
{
|
|
|
|
vm_page_t ma[io_hold_cnt + 2];
|
|
|
|
struct uio *uio_clone, short_uio;
|
|
|
|
struct iovec short_iovec[1];
|
|
|
|
fo_rdwr_t *doio;
|
|
|
|
struct vnode *vp;
|
|
|
|
void *rl_cookie;
|
|
|
|
struct mount *mp;
|
|
|
|
vm_page_t *prev_td_ma;
|
|
|
|
int cnt, error, save, saveheld, prev_td_ma_cnt;
|
|
|
|
vm_offset_t addr, end;
|
|
|
|
vm_prot_t prot;
|
|
|
|
size_t len, resid;
|
|
|
|
ssize_t adv;
|
|
|
|
|
|
|
|
if (uio->uio_rw == UIO_READ)
|
|
|
|
doio = vn_read;
|
|
|
|
else
|
|
|
|
doio = vn_write;
|
|
|
|
vp = fp->f_vnode;
|
2012-07-02 21:01:03 +00:00
|
|
|
foffset_lock_uio(fp, uio, flags);
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
if (uio->uio_segflg != UIO_USERSPACE || vp->v_type != VREG ||
|
|
|
|
((mp = vp->v_mount) != NULL &&
|
2012-06-03 16:19:37 +00:00
|
|
|
(mp->mnt_kern_flag & MNTK_NO_IOPF) == 0) ||
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
!vn_io_fault_enable) {
|
|
|
|
error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td);
|
|
|
|
goto out_last;
|
|
|
|
}
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The UFS follows IO_UNIT directive and replays back both
|
|
|
|
* uio_offset and uio_resid if an error is encountered during the
|
|
|
|
* operation. But, since the iovec may be already advanced,
|
|
|
|
* uio is still in an inconsistent state.
|
|
|
|
*
|
|
|
|
* Cache a copy of the original uio, which is advanced to the redo
|
|
|
|
* point using UIO_NOCOPY below.
|
|
|
|
*/
|
|
|
|
uio_clone = cloneuio(uio);
|
|
|
|
resid = uio->uio_resid;
|
|
|
|
|
|
|
|
short_uio.uio_segflg = UIO_USERSPACE;
|
|
|
|
short_uio.uio_rw = uio->uio_rw;
|
|
|
|
short_uio.uio_td = uio->uio_td;
|
|
|
|
|
|
|
|
if (uio->uio_rw == UIO_READ) {
|
|
|
|
prot = VM_PROT_WRITE;
|
|
|
|
rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset,
|
|
|
|
uio->uio_offset + uio->uio_resid);
|
|
|
|
} else {
|
|
|
|
prot = VM_PROT_READ;
|
|
|
|
if ((fp->f_flag & O_APPEND) != 0 || (flags & FOF_OFFSET) == 0)
|
|
|
|
/* For appenders, punt and lock the whole range. */
|
|
|
|
rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
|
|
|
|
else
|
|
|
|
rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset,
|
|
|
|
uio->uio_offset + uio->uio_resid);
|
|
|
|
}
|
|
|
|
|
|
|
|
save = vm_fault_disable_pagefaults();
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td);
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
if (error != EFAULT)
|
|
|
|
goto out;
|
|
|
|
|
2012-06-03 16:06:56 +00:00
|
|
|
atomic_add_long(&vn_io_faults_cnt, 1);
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
uio_clone->uio_segflg = UIO_NOCOPY;
|
|
|
|
uiomove(NULL, resid - uio->uio_resid, uio_clone);
|
|
|
|
uio_clone->uio_segflg = uio->uio_segflg;
|
|
|
|
|
|
|
|
saveheld = curthread_pflags_set(TDP_UIOHELD);
|
|
|
|
prev_td_ma = td->td_ma;
|
|
|
|
prev_td_ma_cnt = td->td_ma_cnt;
|
|
|
|
|
|
|
|
while (uio_clone->uio_resid != 0) {
|
|
|
|
len = uio_clone->uio_iov->iov_len;
|
|
|
|
if (len == 0) {
|
|
|
|
KASSERT(uio_clone->uio_iovcnt >= 1,
|
|
|
|
("iovcnt underflow"));
|
|
|
|
uio_clone->uio_iov++;
|
|
|
|
uio_clone->uio_iovcnt--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = (vm_offset_t)uio_clone->uio_iov->iov_base;
|
|
|
|
end = round_page(addr + len);
|
|
|
|
cnt = howmany(end - trunc_page(addr), PAGE_SIZE);
|
|
|
|
/*
|
|
|
|
* A perfectly misaligned address and length could cause
|
|
|
|
* both the start and the end of the chunk to use partial
|
|
|
|
* page. +2 accounts for such a situation.
|
|
|
|
*/
|
|
|
|
if (cnt > io_hold_cnt + 2) {
|
|
|
|
len = io_hold_cnt * PAGE_SIZE;
|
|
|
|
KASSERT(howmany(round_page(addr + len) -
|
|
|
|
trunc_page(addr), PAGE_SIZE) <= io_hold_cnt + 2,
|
|
|
|
("cnt overflow"));
|
|
|
|
}
|
|
|
|
cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map,
|
|
|
|
addr, len, prot, ma, io_hold_cnt + 2);
|
|
|
|
if (cnt == -1) {
|
|
|
|
error = EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
short_uio.uio_iov = &short_iovec[0];
|
|
|
|
short_iovec[0].iov_base = (void *)addr;
|
|
|
|
short_uio.uio_iovcnt = 1;
|
|
|
|
short_uio.uio_resid = short_iovec[0].iov_len = len;
|
|
|
|
short_uio.uio_offset = uio_clone->uio_offset;
|
|
|
|
td->td_ma = ma;
|
|
|
|
td->td_ma_cnt = cnt;
|
|
|
|
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
error = doio(fp, &short_uio, active_cred, flags | FOF_OFFSET,
|
|
|
|
td);
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
vm_page_unhold_pages(ma, cnt);
|
|
|
|
adv = len - short_uio.uio_resid;
|
|
|
|
|
|
|
|
uio_clone->uio_iov->iov_base =
|
|
|
|
(char *)uio_clone->uio_iov->iov_base + adv;
|
|
|
|
uio_clone->uio_iov->iov_len -= adv;
|
|
|
|
uio_clone->uio_resid -= adv;
|
|
|
|
uio_clone->uio_offset += adv;
|
|
|
|
|
|
|
|
uio->uio_resid -= adv;
|
|
|
|
uio->uio_offset += adv;
|
|
|
|
|
|
|
|
if (error != 0 || adv == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
td->td_ma = prev_td_ma;
|
|
|
|
td->td_ma_cnt = prev_td_ma_cnt;
|
|
|
|
curthread_pflags_restore(saveheld);
|
|
|
|
out:
|
|
|
|
vm_fault_enable_pagefaults(save);
|
|
|
|
vn_rangelock_unlock(vp, rl_cookie);
|
|
|
|
free(uio_clone, M_IOV);
|
Fix locking for f_offset, vn_read() and vn_write() cases only, for now.
It seems that intended locking protocol for struct file f_offset field
was as follows: f_offset should always be changed under the vnode lock
(except fcntl(2) and lseek(2) did not followed the rules). Since
read(2) uses shared vnode lock, FOFFSET_LOCKED block is additionally
taken to serialize shared vnode lock owners.
This was broken first by enabling shared lock on writes, then by
fadvise changes, which moved f_offset assigned from under vnode lock,
and last by vn_io_fault() doing chunked i/o. More, due to uio_offset
not yet valid in vn_io_fault(), the range lock for reads was taken on
the wrong region.
Change the locking for f_offset to always use FOFFSET_LOCKED block,
which is placed before rangelocks in the lock order.
Extract foffset_lock() and foffset_unlock() functions which implements
FOFFSET_LOCKED lock, and consistently lock f_offset with it in the
vn_io_fault() both for reads and writes, even if MNTK_NO_IOPF flag is
not set for the vnode mount. Indicate that f_offset is already valid
for vn_read() and vn_write() calls from vn_io_fault() with FOF_OFFSET
flag, and assert that all callers of vn_read() and vn_write() follow
this protocol.
Extract get_advice() function to calculate the POSIX_FADV_XXX value
for the i/o region, and use it were appropriate.
Reviewed by: jhb
Tested by: pho
MFC after: 2 weeks
2012-06-21 09:19:41 +00:00
|
|
|
out_last:
|
2012-07-02 21:01:03 +00:00
|
|
|
foffset_unlock_uio(fp, uio, flags);
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper function to perform the requested uiomove operation using
|
|
|
|
* the held pages for io->uio_iov[0].iov_base buffer instead of
|
|
|
|
* copyin/copyout. Access to the pages with uiomove_fromphys()
|
|
|
|
* instead of iov_base prevents page faults that could occur due to
|
|
|
|
* pmap_collect() invalidating the mapping created by
|
|
|
|
* vm_fault_quick_hold_pages(), or pageout daemon, page laundry or
|
|
|
|
* object cleanup revoking the write access from page mappings.
|
|
|
|
*
|
|
|
|
* Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove()
|
|
|
|
* instead of plain uiomove().
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio)
|
|
|
|
{
|
|
|
|
struct uio transp_uio;
|
|
|
|
struct iovec transp_iov[1];
|
2008-01-07 20:05:19 +00:00
|
|
|
struct thread *td;
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
size_t adv;
|
|
|
|
int error, pgadv;
|
|
|
|
|
|
|
|
td = curthread;
|
|
|
|
if ((td->td_pflags & TDP_UIOHELD) == 0 ||
|
|
|
|
uio->uio_segflg != UIO_USERSPACE)
|
|
|
|
return (uiomove(data, xfersize, uio));
|
|
|
|
|
|
|
|
KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt));
|
|
|
|
transp_iov[0].iov_base = data;
|
|
|
|
transp_uio.uio_iov = &transp_iov[0];
|
|
|
|
transp_uio.uio_iovcnt = 1;
|
|
|
|
if (xfersize > uio->uio_resid)
|
|
|
|
xfersize = uio->uio_resid;
|
|
|
|
transp_uio.uio_resid = transp_iov[0].iov_len = xfersize;
|
|
|
|
transp_uio.uio_offset = 0;
|
|
|
|
transp_uio.uio_segflg = UIO_SYSSPACE;
|
|
|
|
/*
|
|
|
|
* Since transp_iov points to data, and td_ma page array
|
|
|
|
* corresponds to original uio->uio_iov, we need to invert the
|
|
|
|
* direction of the i/o operation as passed to
|
|
|
|
* uiomove_fromphys().
|
|
|
|
*/
|
|
|
|
switch (uio->uio_rw) {
|
|
|
|
case UIO_WRITE:
|
|
|
|
transp_uio.uio_rw = UIO_READ;
|
|
|
|
break;
|
|
|
|
case UIO_READ:
|
|
|
|
transp_uio.uio_rw = UIO_WRITE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
transp_uio.uio_td = uio->uio_td;
|
|
|
|
error = uiomove_fromphys(td->td_ma,
|
|
|
|
((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK,
|
|
|
|
xfersize, &transp_uio);
|
|
|
|
adv = xfersize - transp_uio.uio_resid;
|
|
|
|
pgadv =
|
|
|
|
(((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) -
|
|
|
|
(((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT);
|
|
|
|
td->td_ma += pgadv;
|
|
|
|
KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt,
|
|
|
|
pgadv));
|
|
|
|
td->td_ma_cnt -= pgadv;
|
|
|
|
uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv;
|
|
|
|
uio->uio_iov->iov_len -= adv;
|
|
|
|
uio->uio_resid -= adv;
|
|
|
|
uio->uio_offset += adv;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File table truncate routine.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
vn_truncate(struct file *fp, off_t length, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
2008-01-07 20:05:19 +00:00
|
|
|
{
|
|
|
|
struct vattr vattr;
|
|
|
|
struct mount *mp;
|
|
|
|
struct vnode *vp;
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
void *rl_cookie;
|
2008-01-07 20:05:19 +00:00
|
|
|
int vfslocked;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
vp = fp->f_vnode;
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the whole range for truncation. Otherwise split i/o
|
|
|
|
* might happen partly before and partly after the truncation.
|
|
|
|
*/
|
|
|
|
rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
|
2008-01-07 20:05:19 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
if (error)
|
|
|
|
goto out1;
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2008-01-07 20:05:19 +00:00
|
|
|
if (vp->v_type == VDIR) {
|
|
|
|
error = EISDIR;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
#ifdef MAC
|
|
|
|
error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
#endif
|
|
|
|
error = vn_writechk(vp);
|
|
|
|
if (error == 0) {
|
|
|
|
VATTR_NULL(&vattr);
|
|
|
|
vattr.va_size = length;
|
2008-08-28 15:23:18 +00:00
|
|
|
error = VOP_SETATTR(vp, &vattr, fp->f_cred);
|
2008-01-07 20:05:19 +00:00
|
|
|
}
|
|
|
|
out:
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2008-01-07 20:05:19 +00:00
|
|
|
vn_finished_write(mp);
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
out1:
|
2008-01-07 20:05:19 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
vn_io_fault() is a facility to prevent page faults while filesystems
perform copyin/copyout of the file data into the usermode
buffer. Typical filesystem hold vnode lock and some buffer locks over
the VOP_READ() and VOP_WRITE() operations, and since page fault
handler may need to recurse into VFS to get the page content, a
deadlock is possible.
The facility works by disabling page faults handling for the current
thread and attempting to execute i/o while allowing uiomove() to
access the usermode mapping of the i/o buffer. If all buffer pages are
resident, uiomove() is successfull and request is finished. If EFAULT
is returned from uiomove(), the pages backing i/o buffer are faulted
in and held, and the copyin/out is performed using uiomove_fromphys()
over the held pages for the second attempt of VOP call.
Since pages are hold in chunks to prevent large i/o requests from
starving free pages pool, and since vnode lock is only taken for
i/o over the current chunk, the vnode lock no longer protect atomicity
of the whole i/o request. Use newly added rangelocks to provide the
required atomicity of i/o regardind other i/o and truncations.
Filesystems need to explicitely opt-in into the scheme, by setting the
MNTK_NO_IOPF struct mount flag, and optionally by using
vn_io_fault_uiomove(9) helper which takes care of calling uiomove() or
converting uio into request for uiomove_fromphys().
Reviewed by: bf (comments), mdf, pjd (previous version)
Tested by: pho
Tested by: flo, Gustau P?rez <gperez entel upc edu> (previous version)
MFC after: 2 months
2012-05-30 16:42:08 +00:00
|
|
|
vn_rangelock_unlock(vp, rl_cookie);
|
2008-01-07 20:05:19 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* File table vnode stat routine.
|
|
|
|
*/
|
1999-11-08 03:32:15 +00:00
|
|
|
static int
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
vn_statfile(fp, sb, active_cred, td)
|
1999-11-08 03:32:15 +00:00
|
|
|
struct file *fp;
|
|
|
|
struct stat *sb;
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
struct ucred *active_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1999-11-08 03:32:15 +00:00
|
|
|
{
|
2003-06-22 08:41:43 +00:00
|
|
|
struct vnode *vp = fp->f_vnode;
|
2005-01-24 10:31:42 +00:00
|
|
|
int vfslocked;
|
2002-02-10 21:44:30 +00:00
|
|
|
int error;
|
1999-11-08 03:32:15 +00:00
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
Use shared vnode locks instead of exclusive vnode locks for the access(),
chdir(), chroot(), eaccess(), fpathconf(), fstat(), fstatfs(), lseek()
(when figuring out the current size of the file in the SEEK_END case),
pathconf(), readlink(), and statfs() system calls.
Submitted by: ups (mostly)
Tested by: pho
MFC after: 1 month
2008-11-03 20:31:00 +00:00
|
|
|
vn_lock(vp, LK_SHARED | LK_RETRY);
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2002-02-10 21:44:30 +00:00
|
|
|
|
|
|
|
return (error);
|
1999-11-08 03:32:15 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Stat a vnode; implementation for the stat syscall
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
vn_stat(vp, sb, active_cred, file_cred, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
register struct stat *sb;
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
struct ucred *active_cred;
|
|
|
|
struct ucred *file_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct vattr vattr;
|
|
|
|
register struct vattr *vap;
|
|
|
|
int error;
|
|
|
|
u_short mode;
|
|
|
|
|
2002-08-01 17:23:22 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_stat(active_cred, file_cred, vp);
|
2002-08-01 17:23:22 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
vap = &vattr;
|
2008-09-20 19:43:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize defaults for new and unusual fields, so that file
|
|
|
|
* systems which don't support these fields don't need to know
|
|
|
|
* about them.
|
|
|
|
*/
|
|
|
|
vap->va_birthtime.tv_sec = -1;
|
|
|
|
vap->va_birthtime.tv_nsec = 0;
|
2008-09-20 19:48:24 +00:00
|
|
|
vap->va_fsid = VNOVAL;
|
|
|
|
vap->va_rdev = NODEV;
|
2008-09-20 19:43:22 +00:00
|
|
|
|
2008-08-28 15:23:18 +00:00
|
|
|
error = VOP_GETATTR(vp, vap, active_cred);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
1999-11-18 08:14:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Zero the spare stat fields
|
|
|
|
*/
|
2002-06-24 07:14:44 +00:00
|
|
|
bzero(sb, sizeof *sb);
|
1999-11-18 08:14:20 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copy from vattr table
|
|
|
|
*/
|
1999-07-02 16:29:47 +00:00
|
|
|
if (vap->va_fsid != VNOVAL)
|
|
|
|
sb->st_dev = vap->va_fsid;
|
|
|
|
else
|
|
|
|
sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
|
1994-05-24 10:09:53 +00:00
|
|
|
sb->st_ino = vap->va_fileid;
|
|
|
|
mode = vap->va_mode;
|
1998-06-27 06:43:09 +00:00
|
|
|
switch (vap->va_type) {
|
1994-05-24 10:09:53 +00:00
|
|
|
case VREG:
|
|
|
|
mode |= S_IFREG;
|
|
|
|
break;
|
|
|
|
case VDIR:
|
|
|
|
mode |= S_IFDIR;
|
|
|
|
break;
|
|
|
|
case VBLK:
|
|
|
|
mode |= S_IFBLK;
|
|
|
|
break;
|
|
|
|
case VCHR:
|
|
|
|
mode |= S_IFCHR;
|
|
|
|
break;
|
|
|
|
case VLNK:
|
|
|
|
mode |= S_IFLNK;
|
|
|
|
break;
|
|
|
|
case VSOCK:
|
|
|
|
mode |= S_IFSOCK;
|
|
|
|
break;
|
|
|
|
case VFIFO:
|
|
|
|
mode |= S_IFIFO;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EBADF);
|
|
|
|
};
|
|
|
|
sb->st_mode = mode;
|
|
|
|
sb->st_nlink = vap->va_nlink;
|
|
|
|
sb->st_uid = vap->va_uid;
|
|
|
|
sb->st_gid = vap->va_gid;
|
|
|
|
sb->st_rdev = vap->va_rdev;
|
2001-08-23 17:56:48 +00:00
|
|
|
if (vap->va_size > OFF_MAX)
|
|
|
|
return (EOVERFLOW);
|
1994-05-24 10:09:53 +00:00
|
|
|
sb->st_size = vap->va_size;
|
2010-03-28 13:13:22 +00:00
|
|
|
sb->st_atim = vap->va_atime;
|
|
|
|
sb->st_mtim = vap->va_mtime;
|
|
|
|
sb->st_ctim = vap->va_ctime;
|
|
|
|
sb->st_birthtim = vap->va_birthtime;
|
1999-09-09 19:08:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* According to www.opengroup.org, the meaning of st_blksize is
|
|
|
|
* "a filesystem-specific preferred I/O block size for this
|
|
|
|
* object. In some filesystem types, this may vary from file
|
|
|
|
* to file"
|
2010-04-03 08:39:00 +00:00
|
|
|
* Use miminum/default of PAGE_SIZE (e.g. for VCHR).
|
1999-08-13 10:56:07 +00:00
|
|
|
*/
|
1999-09-09 19:08:44 +00:00
|
|
|
|
2010-04-03 08:39:00 +00:00
|
|
|
sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
|
1999-09-09 19:08:44 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
sb->st_flags = vap->va_flags;
|
2006-11-06 13:42:10 +00:00
|
|
|
if (priv_check(td, PRIV_VFS_GENERATION))
|
1997-03-08 15:14:30 +00:00
|
|
|
sb->st_gen = 0;
|
|
|
|
else
|
|
|
|
sb->st_gen = vap->va_gen;
|
1997-03-07 07:42:41 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
sb->st_blocks = vap->va_bytes / S_BLKSIZE;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File table vnode ioctl routine.
|
|
|
|
*/
|
1995-12-17 21:23:44 +00:00
|
|
|
static int
|
2002-08-17 02:36:16 +00:00
|
|
|
vn_ioctl(fp, com, data, active_cred, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
1998-06-07 17:13:14 +00:00
|
|
|
u_long com;
|
2002-06-29 01:50:25 +00:00
|
|
|
void *data;
|
2002-08-17 02:36:16 +00:00
|
|
|
struct ucred *active_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-06-22 08:41:43 +00:00
|
|
|
struct vnode *vp = fp->f_vnode;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vattr vattr;
|
2005-01-24 10:31:42 +00:00
|
|
|
int vfslocked;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2004-11-17 09:09:55 +00:00
|
|
|
error = ENOTTY;
|
1994-05-24 10:09:53 +00:00
|
|
|
switch (vp->v_type) {
|
|
|
|
case VREG:
|
|
|
|
case VDIR:
|
|
|
|
if (com == FIONREAD) {
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2008-08-28 15:23:18 +00:00
|
|
|
error = VOP_GETATTR(vp, &vattr, active_cred);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2004-11-17 09:09:55 +00:00
|
|
|
if (!error)
|
|
|
|
*(int *)data = vattr.va_size - fp->f_offset;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
if (com == FIONBIO || com == FIOASYNC) /* XXX */
|
2004-11-17 09:09:55 +00:00
|
|
|
error = 0;
|
2004-11-18 17:15:04 +00:00
|
|
|
else
|
|
|
|
error = VOP_IOCTL(vp, com, data, fp->f_flag,
|
|
|
|
active_cred, td);
|
2004-11-17 09:09:55 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
default:
|
2004-11-17 09:09:55 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2004-11-17 09:09:55 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-09-14 02:51:16 +00:00
|
|
|
* File table vnode poll routine.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-12-17 21:23:44 +00:00
|
|
|
static int
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
vn_poll(fp, events, active_cred, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp;
|
1997-09-14 02:51:16 +00:00
|
|
|
int events;
|
Make similar changes to fo_stat() and fo_poll() as made earlier to
fo_read() and fo_write(): explicitly use the cred argument to fo_poll()
as "active_cred" using the passed file descriptor's f_cred reference
to provide access to the file credential. Add an active_cred
argument to fo_stat() so that implementers have access to the active
credential as well as the file credential. Generally modify callers
of fo_stat() to pass in td->td_ucred rather than fp->f_cred, which
was redundantly provided via the fp argument. This set of modifications
also permits threads to perform these operations on behalf of another
thread without modifying their credential.
Trickle this change down into fo_stat/poll() implementations:
- badfo_poll(), badfo_stat(): modify/add arguments.
- kqueue_poll(), kqueue_stat(): modify arguments.
- pipe_poll(), pipe_stat(): modify/add arguments, pass active_cred to
MAC checks rather than td->td_ucred.
- soo_poll(), soo_stat(): modify/add arguments, pass fp->f_cred rather
than cred to pru_sopoll() to maintain current semantics.
- sopoll(): moidfy arguments.
- vn_poll(), vn_statfile(): modify/add arguments, pass new arguments
to vn_stat(). Pass active_cred to MAC and fp->f_cred to VOP_POLL()
to maintian current semantics.
- vn_close(): rename cred to file_cred to reflect reality while I'm here.
- vn_stat(): Add active_cred and file_cred arguments to vn_stat()
and consumers so that this distinction is maintained at the VFS
as well as 'struct file' layer. Pass active_cred instead of
td->td_ucred to MAC and to VOP_GETATTR() to maintain current semantics.
- fifofs: modify the creation of a "filetemp" so that the file
credential is properly initialized and can be used in the socket
code if desired. Pass ap->a_td->td_ucred as the active
credential to soo_poll(). If we teach the vnop interface about
the distinction between file and active credentials, we would use
the active credential here.
Note that current inconsistent passing of active_cred vs. file_cred to
VOP's is maintained. It's not clear why GETATTR would be authorized
using active_cred while POLL would be authorized using file_cred at
the file system level.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-16 12:52:03 +00:00
|
|
|
struct ucred *active_cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-08-01 17:23:22 +00:00
|
|
|
struct vnode *vp;
|
2006-03-22 00:00:05 +00:00
|
|
|
int vfslocked;
|
2002-08-01 17:23:22 +00:00
|
|
|
int error;
|
|
|
|
|
2003-06-22 08:41:43 +00:00
|
|
|
vp = fp->f_vnode;
|
2006-03-22 00:00:05 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2002-08-01 17:23:22 +00:00
|
|
|
#ifdef MAC
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2004-11-15 21:56:42 +00:00
|
|
|
if (!error)
|
2002-08-01 17:23:22 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-11-15 21:56:42 +00:00
|
|
|
error = VOP_POLL(vp, events, fp->f_cred, td);
|
2006-03-22 00:00:05 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2004-11-15 21:56:42 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
2008-03-29 23:36:26 +00:00
|
|
|
* Acquire the requested lock and then check for validity. LK_RETRY
|
|
|
|
* permits vn_lock to return doomed vnodes.
|
1997-02-10 02:22:35 +00:00
|
|
|
*/
|
|
|
|
int
|
2008-01-10 01:10:58 +00:00
|
|
|
_vn_lock(struct vnode *vp, int flags, char *file, int line)
|
1997-02-10 02:22:35 +00:00
|
|
|
{
|
|
|
|
int error;
|
2001-03-26 12:45:35 +00:00
|
|
|
|
2008-03-29 23:36:26 +00:00
|
|
|
VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
|
|
|
|
("vn_lock called with no locktype."));
|
2008-03-24 04:17:35 +00:00
|
|
|
do {
|
2008-10-20 10:11:33 +00:00
|
|
|
#ifdef DEBUG_VFS_LOCKS
|
|
|
|
KASSERT(vp->v_holdcnt != 0,
|
|
|
|
("vn_lock %p: zero hold count", vp));
|
|
|
|
#endif
|
2008-03-24 04:17:35 +00:00
|
|
|
error = VOP_LOCK1(vp, flags, file, line);
|
|
|
|
flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */
|
2005-06-13 00:47:29 +00:00
|
|
|
KASSERT((flags & LK_RETRY) == 0 || error == 0,
|
2008-11-29 12:40:14 +00:00
|
|
|
("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)",
|
|
|
|
flags, error));
|
2005-03-13 11:56:28 +00:00
|
|
|
/*
|
|
|
|
* Callers specify LK_RETRY if they wish to get dead vnodes.
|
|
|
|
* If RETRY is not set, we return ENOENT instead.
|
|
|
|
*/
|
2005-04-11 09:23:56 +00:00
|
|
|
if (error == 0 && vp->v_iflag & VI_DOOMED &&
|
2005-04-27 09:17:11 +00:00
|
|
|
(flags & LK_RETRY) == 0) {
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2005-03-13 11:56:28 +00:00
|
|
|
error = ENOENT;
|
|
|
|
break;
|
|
|
|
}
|
2002-08-22 06:58:11 +00:00
|
|
|
} while (flags & LK_RETRY && error != 0);
|
1997-02-10 02:22:35 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1997-10-27 15:26:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* File table vnode close routine.
|
|
|
|
*/
|
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_closefile(fp, td)
|
1997-10-27 15:26:23 +00:00
|
|
|
struct file *fp;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1997-10-27 15:26:23 +00:00
|
|
|
{
|
2004-06-01 18:03:20 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct flock lf;
|
2005-01-24 10:31:42 +00:00
|
|
|
int vfslocked;
|
2004-07-22 18:35:43 +00:00
|
|
|
int error;
|
2004-06-01 18:03:20 +00:00
|
|
|
|
|
|
|
vp = fp->f_vnode;
|
2012-07-31 18:25:00 +00:00
|
|
|
fp->f_ops = &badfileops;
|
2004-06-01 18:03:20 +00:00
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2012-07-31 18:25:00 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK)
|
|
|
|
vref(vp);
|
|
|
|
|
|
|
|
error = vn_close(vp, fp->f_flag, fp->f_cred, td);
|
|
|
|
|
2004-06-01 23:36:47 +00:00
|
|
|
if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
|
2004-06-01 18:03:20 +00:00
|
|
|
lf.l_whence = SEEK_SET;
|
|
|
|
lf.l_start = 0;
|
|
|
|
lf.l_len = 0;
|
|
|
|
lf.l_type = F_UNLCK;
|
2005-12-14 00:49:52 +00:00
|
|
|
(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
|
2012-07-31 18:25:00 +00:00
|
|
|
vrele(vp);
|
2004-06-01 18:03:20 +00:00
|
|
|
}
|
2005-01-24 10:31:42 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2004-07-22 18:35:43 +00:00
|
|
|
return (error);
|
1997-10-27 15:26:23 +00:00
|
|
|
}
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* Preparing to start a filesystem write operation. If the operation is
|
|
|
|
* permitted, then we bump the count of operations in progress and
|
|
|
|
* proceed. If a suspend request is in progress, we wait until the
|
|
|
|
* suspension is over, and then proceed.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vn_start_write(vp, mpp, flags)
|
|
|
|
struct vnode *vp;
|
|
|
|
struct mount **mpp;
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
struct mount *mp;
|
|
|
|
int error;
|
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
error = 0;
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* If a vnode is provided, get and return the mount point that
|
|
|
|
* to which it will write.
|
|
|
|
*/
|
|
|
|
if (vp != NULL) {
|
|
|
|
if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
|
|
|
|
*mpp = NULL;
|
|
|
|
if (error != EOPNOTSUPP)
|
|
|
|
return (error);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ((mp = *mpp) == NULL)
|
|
|
|
return (0);
|
2008-11-02 10:15:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* VOP_GETWRITEMOUNT() returns with the mp refcount held through
|
|
|
|
* a vfs_ref().
|
|
|
|
* As long as a vnode is not provided we need to acquire a
|
|
|
|
* refcount for the provided mountpoint too, in order to
|
|
|
|
* emulate a vfs_ref().
|
|
|
|
*/
|
2005-01-24 10:31:42 +00:00
|
|
|
MNT_ILOCK(mp);
|
2006-03-31 03:54:20 +00:00
|
|
|
if (vp == NULL)
|
|
|
|
MNT_REF(mp);
|
2008-11-02 10:15:42 +00:00
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* Check on status of suspension.
|
|
|
|
*/
|
2008-09-16 11:51:06 +00:00
|
|
|
if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
|
|
|
|
mp->mnt_susp_owner != curthread) {
|
|
|
|
while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
|
|
|
|
if (flags & V_NOWAIT) {
|
|
|
|
error = EWOULDBLOCK;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
error = msleep(&mp->mnt_flag, MNT_MTX(mp),
|
|
|
|
(PUSER - 1) | (flags & PCATCH), "suspfs", 0);
|
2008-11-16 21:56:29 +00:00
|
|
|
if (error)
|
2008-09-16 11:51:06 +00:00
|
|
|
goto unlock;
|
2005-01-24 10:31:42 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
}
|
|
|
|
if (flags & V_XSLEEP)
|
2005-01-24 10:31:42 +00:00
|
|
|
goto unlock;
|
2000-07-11 22:07:57 +00:00
|
|
|
mp->mnt_writeopcount++;
|
2005-01-24 10:31:42 +00:00
|
|
|
unlock:
|
2009-09-01 12:05:39 +00:00
|
|
|
if (error != 0 || (flags & V_XSLEEP) != 0)
|
2009-08-31 10:20:52 +00:00
|
|
|
MNT_REL(mp);
|
2005-01-24 10:31:42 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
return (error);
|
2000-07-11 22:07:57 +00:00
|
|
|
}
|
|
|
|
|
2006-03-08 23:43:39 +00:00
|
|
|
/*
|
|
|
|
* Secondary suspension. Used by operations such as vop_inactive
|
|
|
|
* routines that are needed by the higher level functions. These
|
|
|
|
* are allowed to proceed until all the higher level functions have
|
|
|
|
* completed (indicated by mnt_writeopcount dropping to zero). At that
|
|
|
|
* time, these operations are halted until the suspension is over.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vn_start_secondary_write(vp, mpp, flags)
|
|
|
|
struct vnode *vp;
|
|
|
|
struct mount **mpp;
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
struct mount *mp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
retry:
|
|
|
|
if (vp != NULL) {
|
|
|
|
if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
|
|
|
|
*mpp = NULL;
|
|
|
|
if (error != EOPNOTSUPP)
|
|
|
|
return (error);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we are not suspended or have not yet reached suspended
|
|
|
|
* mode, then let the operation proceed.
|
|
|
|
*/
|
|
|
|
if ((mp = *mpp) == NULL)
|
|
|
|
return (0);
|
2008-11-02 10:15:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* VOP_GETWRITEMOUNT() returns with the mp refcount held through
|
|
|
|
* a vfs_ref().
|
|
|
|
* As long as a vnode is not provided we need to acquire a
|
|
|
|
* refcount for the provided mountpoint too, in order to
|
|
|
|
* emulate a vfs_ref().
|
|
|
|
*/
|
2006-03-08 23:43:39 +00:00
|
|
|
MNT_ILOCK(mp);
|
2006-03-31 03:54:20 +00:00
|
|
|
if (vp == NULL)
|
|
|
|
MNT_REF(mp);
|
2006-03-11 01:08:37 +00:00
|
|
|
if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
|
2006-03-08 23:43:39 +00:00
|
|
|
mp->mnt_secondary_writes++;
|
|
|
|
mp->mnt_secondary_accwrites++;
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (flags & V_NOWAIT) {
|
2006-03-31 03:54:20 +00:00
|
|
|
MNT_REL(mp);
|
2006-03-08 23:43:39 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
return (EWOULDBLOCK);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Wait for the suspension to finish.
|
|
|
|
*/
|
|
|
|
error = msleep(&mp->mnt_flag, MNT_MTX(mp),
|
|
|
|
(PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
|
2006-03-31 03:54:20 +00:00
|
|
|
vfs_rel(mp);
|
2006-03-08 23:43:39 +00:00
|
|
|
if (error == 0)
|
|
|
|
goto retry;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* Filesystem write operation has completed. If we are suspending and this
|
|
|
|
* operation is the last one, notify the suspender that the suspension is
|
|
|
|
* now in effect.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
vn_finished_write(mp)
|
|
|
|
struct mount *mp;
|
|
|
|
{
|
|
|
|
if (mp == NULL)
|
|
|
|
return;
|
2005-01-24 10:31:42 +00:00
|
|
|
MNT_ILOCK(mp);
|
2009-08-31 10:20:52 +00:00
|
|
|
MNT_REL(mp);
|
2000-07-11 22:07:57 +00:00
|
|
|
mp->mnt_writeopcount--;
|
|
|
|
if (mp->mnt_writeopcount < 0)
|
|
|
|
panic("vn_finished_write: neg cnt");
|
|
|
|
if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
|
|
|
|
mp->mnt_writeopcount <= 0)
|
|
|
|
wakeup(&mp->mnt_writeopcount);
|
2005-01-24 10:31:42 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2000-07-11 22:07:57 +00:00
|
|
|
}
|
|
|
|
|
2006-03-08 23:43:39 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Filesystem secondary write operation has completed. If we are
|
|
|
|
* suspending and this operation is the last one, notify the suspender
|
|
|
|
* that the suspension is now in effect.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
vn_finished_secondary_write(mp)
|
|
|
|
struct mount *mp;
|
|
|
|
{
|
|
|
|
if (mp == NULL)
|
|
|
|
return;
|
|
|
|
MNT_ILOCK(mp);
|
2009-08-31 10:20:52 +00:00
|
|
|
MNT_REL(mp);
|
2006-03-08 23:43:39 +00:00
|
|
|
mp->mnt_secondary_writes--;
|
|
|
|
if (mp->mnt_secondary_writes < 0)
|
|
|
|
panic("vn_finished_secondary_write: neg cnt");
|
|
|
|
if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
|
|
|
|
mp->mnt_secondary_writes <= 0)
|
|
|
|
wakeup(&mp->mnt_secondary_writes);
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* Request a filesystem to suspend write operations.
|
|
|
|
*/
|
2002-10-25 00:20:37 +00:00
|
|
|
int
|
2000-07-11 22:07:57 +00:00
|
|
|
vfs_write_suspend(mp)
|
|
|
|
struct mount *mp;
|
|
|
|
{
|
2002-10-25 00:20:37 +00:00
|
|
|
int error;
|
2000-07-11 22:07:57 +00:00
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
MNT_ILOCK(mp);
|
2008-09-16 11:51:06 +00:00
|
|
|
if (mp->mnt_susp_owner == curthread) {
|
2006-06-24 22:55:43 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2008-09-16 11:51:06 +00:00
|
|
|
return (EALREADY);
|
2006-06-24 22:55:43 +00:00
|
|
|
}
|
2008-09-16 11:51:06 +00:00
|
|
|
while (mp->mnt_kern_flag & MNTK_SUSPEND)
|
|
|
|
msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
|
2000-07-11 22:07:57 +00:00
|
|
|
mp->mnt_kern_flag |= MNTK_SUSPEND;
|
2008-09-16 11:51:06 +00:00
|
|
|
mp->mnt_susp_owner = curthread;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (mp->mnt_writeopcount > 0)
|
2005-01-24 10:31:42 +00:00
|
|
|
(void) msleep(&mp->mnt_writeopcount,
|
|
|
|
MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
|
|
|
|
else
|
|
|
|
MNT_IUNLOCK(mp);
|
2009-05-11 15:33:26 +00:00
|
|
|
if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0)
|
2002-10-25 00:20:37 +00:00
|
|
|
vfs_write_resume(mp);
|
2005-01-24 10:31:42 +00:00
|
|
|
return (error);
|
2000-07-11 22:07:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Request a filesystem to resume write operations.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
vfs_write_resume(mp)
|
|
|
|
struct mount *mp;
|
|
|
|
{
|
|
|
|
|
2005-01-24 10:31:42 +00:00
|
|
|
MNT_ILOCK(mp);
|
|
|
|
if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
|
2008-09-16 11:51:06 +00:00
|
|
|
KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
|
2006-03-11 01:08:37 +00:00
|
|
|
mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
|
|
|
|
MNTK_SUSPENDED);
|
2008-09-16 11:51:06 +00:00
|
|
|
mp->mnt_susp_owner = NULL;
|
2005-01-24 10:31:42 +00:00
|
|
|
wakeup(&mp->mnt_writeopcount);
|
|
|
|
wakeup(&mp->mnt_flag);
|
2008-09-16 11:51:06 +00:00
|
|
|
curthread->td_pflags &= ~TDP_IGNSUSP;
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
VFS_SUSP_CLEAN(mp);
|
|
|
|
} else
|
|
|
|
MNT_IUNLOCK(mp);
|
2000-07-11 22:07:57 +00:00
|
|
|
}
|
|
|
|
|
2002-03-05 15:38:49 +00:00
|
|
|
/*
|
|
|
|
* Implement kqueues for files by translating it to vnode operation.
|
|
|
|
*/
|
2000-04-16 18:53:38 +00:00
|
|
|
static int
|
2001-02-15 16:34:11 +00:00
|
|
|
vn_kqfilter(struct file *fp, struct knote *kn)
|
2000-04-16 18:53:38 +00:00
|
|
|
{
|
2006-03-22 00:00:05 +00:00
|
|
|
int vfslocked;
|
2004-08-15 06:24:42 +00:00
|
|
|
int error;
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2006-03-22 00:00:05 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
|
2004-08-15 06:24:42 +00:00
|
|
|
error = VOP_KQFILTER(fp->f_vnode, kn);
|
2006-03-22 00:00:05 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2004-08-06 22:25:35 +00:00
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
return error;
|
2000-04-16 18:53:38 +00:00
|
|
|
}
|
2000-08-08 17:15:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Simplified in-kernel wrapper calls for extended attribute access.
|
|
|
|
* Both calls pass in a NULL credential, authorizing as "kernel" access.
|
|
|
|
* Set IO_NODELOCKED in ioflg if the vnode is already locked.
|
|
|
|
*/
|
|
|
|
int
|
2001-03-19 05:44:15 +00:00
|
|
|
vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
|
2001-09-12 08:38:13 +00:00
|
|
|
const char *attrname, int *buflen, char *buf, struct thread *td)
|
2000-08-08 17:15:32 +00:00
|
|
|
{
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec iov;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
iov.iov_len = *buflen;
|
|
|
|
iov.iov_base = buf;
|
|
|
|
|
|
|
|
auio.uio_iov = &iov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_segflg = UIO_SYSSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
2000-08-08 17:15:32 +00:00
|
|
|
auio.uio_offset = 0;
|
|
|
|
auio.uio_resid = *buflen;
|
|
|
|
|
|
|
|
if ((ioflg & IO_NODELOCKED) == 0)
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2000-08-08 17:15:32 +00:00
|
|
|
|
2005-02-24 00:13:16 +00:00
|
|
|
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
|
|
|
|
|
2000-08-08 17:15:32 +00:00
|
|
|
/* authorize attribute retrieval as kernel */
|
Part I: Update extended attribute API and ABI:
o Modify the system call syntax for extattr_{get,set}_{fd,file}() so
as not to use the scatter gather API (which appeared not to be used
by any consumers, and be less portable), rather, accepts 'data'
and 'nbytes' in the style of other simple read/write interfaces.
This changes the API and ABI.
o Modify system call semantics so that extattr_get_{fd,file}() return
a size_t. When performing a read, the number of bytes read will
be returned, unless the data pointer is NULL, in which case the
number of bytes of data are returned. This changes the API only.
o Modify the VOP_GETEXTATTR() vnode operation to accept a *size_t
argument so as to return the size, if desirable. If set to NULL,
the size will not be returned.
o Update various filesystems (pseodofs, ufs) to DTRT.
These changes should make extended attributes more useful and more
portable. More commits to rebuild the system call files, as well
as update userland utilities to follow.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-02-10 04:43:22 +00:00
|
|
|
error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
|
|
|
|
td);
|
2000-08-08 17:15:32 +00:00
|
|
|
|
|
|
|
if ((ioflg & IO_NODELOCKED) == 0)
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2000-08-08 17:15:32 +00:00
|
|
|
|
|
|
|
if (error == 0) {
|
|
|
|
*buflen = *buflen - auio.uio_resid;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX failure mode if partially written?
|
|
|
|
*/
|
|
|
|
int
|
2001-03-19 05:44:15 +00:00
|
|
|
vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
|
2001-09-12 08:38:13 +00:00
|
|
|
const char *attrname, int buflen, char *buf, struct thread *td)
|
2000-08-08 17:15:32 +00:00
|
|
|
{
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec iov;
|
2000-09-05 03:15:02 +00:00
|
|
|
struct mount *mp;
|
2000-08-08 17:15:32 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
iov.iov_len = buflen;
|
|
|
|
iov.iov_base = buf;
|
|
|
|
|
|
|
|
auio.uio_iov = &iov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_segflg = UIO_SYSSPACE;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
2000-08-08 17:15:32 +00:00
|
|
|
auio.uio_offset = 0;
|
|
|
|
auio.uio_resid = buflen;
|
|
|
|
|
2000-09-05 03:15:02 +00:00
|
|
|
if ((ioflg & IO_NODELOCKED) == 0) {
|
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
|
|
|
|
return (error);
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2000-09-05 03:15:02 +00:00
|
|
|
}
|
2000-08-08 17:15:32 +00:00
|
|
|
|
2005-02-24 00:13:16 +00:00
|
|
|
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
|
|
|
|
|
2000-08-08 17:15:32 +00:00
|
|
|
/* authorize attribute setting as kernel */
|
2001-09-12 08:38:13 +00:00
|
|
|
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
|
2000-08-08 17:15:32 +00:00
|
|
|
|
2000-09-05 03:15:02 +00:00
|
|
|
if ((ioflg & IO_NODELOCKED) == 0) {
|
|
|
|
vn_finished_write(mp);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2000-09-05 03:15:02 +00:00
|
|
|
}
|
2000-08-08 17:15:32 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
2000-09-22 22:33:13 +00:00
|
|
|
|
|
|
|
int
|
2001-03-19 05:44:15 +00:00
|
|
|
vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
|
2001-09-12 08:38:13 +00:00
|
|
|
const char *attrname, struct thread *td)
|
2000-09-22 22:33:13 +00:00
|
|
|
{
|
|
|
|
struct mount *mp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if ((ioflg & IO_NODELOCKED) == 0) {
|
|
|
|
if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
|
|
|
|
return (error);
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2000-09-22 22:33:13 +00:00
|
|
|
}
|
|
|
|
|
2005-02-24 00:13:16 +00:00
|
|
|
ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
|
|
|
|
|
2000-09-22 22:33:13 +00:00
|
|
|
/* authorize attribute removal as kernel */
|
2003-07-28 18:53:29 +00:00
|
|
|
error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
|
2003-06-22 23:03:07 +00:00
|
|
|
if (error == EOPNOTSUPP)
|
|
|
|
error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
|
|
|
|
NULL, td);
|
2000-09-22 22:33:13 +00:00
|
|
|
|
|
|
|
if ((ioflg & IO_NODELOCKED) == 0) {
|
|
|
|
vn_finished_write(mp);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2000-09-22 22:33:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
2009-01-21 14:51:38 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
|
|
|
|
{
|
|
|
|
struct mount *mp;
|
|
|
|
int ltype, error;
|
|
|
|
|
|
|
|
mp = vp->v_mount;
|
|
|
|
ltype = VOP_ISLOCKED(vp);
|
|
|
|
KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
|
|
|
|
("vn_vget_ino: vp not locked"));
|
2009-05-07 18:14:21 +00:00
|
|
|
error = vfs_busy(mp, MBF_NOWAIT);
|
|
|
|
if (error != 0) {
|
2009-07-02 18:02:55 +00:00
|
|
|
vfs_ref(mp);
|
2009-01-21 14:51:38 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2009-05-07 18:14:21 +00:00
|
|
|
error = vfs_busy(mp, 0);
|
2009-01-21 14:51:38 +00:00
|
|
|
vn_lock(vp, ltype | LK_RETRY);
|
2009-07-02 18:02:55 +00:00
|
|
|
vfs_rel(mp);
|
2009-05-07 18:14:21 +00:00
|
|
|
if (error != 0)
|
2009-01-21 14:51:38 +00:00
|
|
|
return (ENOENT);
|
2009-05-07 18:14:21 +00:00
|
|
|
if (vp->v_iflag & VI_DOOMED) {
|
|
|
|
vfs_unbusy(mp);
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
2009-01-21 14:51:38 +00:00
|
|
|
}
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
error = VFS_VGET(mp, ino, lkflags, rvp);
|
|
|
|
vfs_unbusy(mp);
|
|
|
|
vn_lock(vp, ltype | LK_RETRY);
|
|
|
|
if (vp->v_iflag & VI_DOOMED) {
|
|
|
|
if (error == 0)
|
|
|
|
vput(*rvp);
|
|
|
|
error = ENOENT;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
2010-05-05 16:44:25 +00:00
|
|
|
|
|
|
|
int
|
2010-05-06 18:43:19 +00:00
|
|
|
vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio,
|
|
|
|
const struct thread *td)
|
2010-05-05 16:44:25 +00:00
|
|
|
{
|
2010-05-06 18:43:19 +00:00
|
|
|
|
2010-05-05 16:44:25 +00:00
|
|
|
if (vp->v_type != VREG || td == NULL)
|
|
|
|
return (0);
|
|
|
|
PROC_LOCK(td->td_proc);
|
2010-05-06 18:52:41 +00:00
|
|
|
if ((uoff_t)uio->uio_offset + uio->uio_resid >
|
2010-05-05 16:44:25 +00:00
|
|
|
lim_cur(td->td_proc, RLIMIT_FSIZE)) {
|
2011-09-16 13:58:51 +00:00
|
|
|
kern_psignal(td->td_proc, SIGXFSZ);
|
2010-05-05 16:44:25 +00:00
|
|
|
PROC_UNLOCK(td->td_proc);
|
|
|
|
return (EFBIG);
|
|
|
|
}
|
|
|
|
PROC_UNLOCK(td->td_proc);
|
|
|
|
return (0);
|
|
|
|
}
|
2011-08-16 20:07:47 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
int error, vfslocked;
|
|
|
|
|
|
|
|
vp = fp->f_vnode;
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
#ifdef AUDIT
|
|
|
|
vn_lock(vp, LK_SHARED | LK_RETRY);
|
|
|
|
AUDIT_ARG_VNODE1(vp);
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
#endif
|
|
|
|
error = setfmode(td, active_cred, vp, mode);
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
int error, vfslocked;
|
|
|
|
|
|
|
|
vp = fp->f_vnode;
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
#ifdef AUDIT
|
|
|
|
vn_lock(vp, LK_SHARED | LK_RETRY);
|
|
|
|
AUDIT_ARG_VNODE1(vp);
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
#endif
|
|
|
|
error = setfown(td, active_cred, vp, uid, gid);
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
return (error);
|
|
|
|
}
|
2011-08-25 08:17:39 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
|
|
|
|
{
|
|
|
|
vm_object_t object;
|
|
|
|
|
|
|
|
if ((object = vp->v_object) == NULL)
|
|
|
|
return;
|
|
|
|
VM_OBJECT_LOCK(object);
|
|
|
|
vm_object_page_remove(object, start, end, 0);
|
|
|
|
VM_OBJECT_UNLOCK(object);
|
|
|
|
}
|
2012-05-26 05:28:47 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred)
|
|
|
|
{
|
|
|
|
struct vattr va;
|
|
|
|
daddr_t bn, bnp;
|
|
|
|
uint64_t bsize;
|
|
|
|
off_t noff;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA,
|
|
|
|
("Wrong command %lu", cmd));
|
|
|
|
|
|
|
|
if (vn_lock(vp, LK_SHARED) != 0)
|
|
|
|
return (EBADF);
|
|
|
|
if (vp->v_type != VREG) {
|
|
|
|
error = ENOTTY;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
error = VOP_GETATTR(vp, &va, cred);
|
|
|
|
if (error != 0)
|
|
|
|
goto unlock;
|
|
|
|
noff = *off;
|
|
|
|
if (noff >= va.va_size) {
|
|
|
|
error = ENXIO;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
bsize = vp->v_mount->mnt_stat.f_iosize;
|
|
|
|
for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) {
|
|
|
|
error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL);
|
|
|
|
if (error == EOPNOTSUPP) {
|
|
|
|
error = ENOTTY;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
if ((bnp == -1 && cmd == FIOSEEKHOLE) ||
|
|
|
|
(bnp != -1 && cmd == FIOSEEKDATA)) {
|
|
|
|
noff = bn * bsize;
|
|
|
|
if (noff < *off)
|
|
|
|
noff = *off;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (noff > va.va_size)
|
|
|
|
noff = va.va_size;
|
|
|
|
/* noff == va.va_size. There is an implicit hole at the end of file. */
|
|
|
|
if (cmd == FIOSEEKDATA)
|
|
|
|
error = ENXIO;
|
|
|
|
unlock:
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
if (error == 0)
|
|
|
|
*off = noff;
|
|
|
|
return (error);
|
|
|
|
}
|