2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
2017-12-27 19:13:50 +00:00
|
|
|
* SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-3-Clause)
|
2017-11-20 19:43:44 +00:00
|
|
|
*
|
2003-07-28 18:53:29 +00:00
|
|
|
* Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
|
2002-08-12 10:32:56 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This software was developed for the FreeBSD Project by Marshall
|
|
|
|
* Kirk McKusick and Network Associates Laboratories, the Security
|
|
|
|
* Research Division of Network Associates, Inc. under DARPA/SPAWAR
|
|
|
|
* contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
|
|
|
|
* research program
|
|
|
|
*
|
2004-10-20 08:05:02 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2017-02-28 23:42:47 +00:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-05-24 10:09:53 +00:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
2004-04-07 11:21:18 +00:00
|
|
|
* from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
|
|
|
|
* from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
|
1997-02-10 02:22:35 +00:00
|
|
|
* @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 06:34:30 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
2002-09-05 09:43:24 +00:00
|
|
|
#include <sys/bio.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/systm.h>
|
2002-09-05 09:43:24 +00:00
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/conf.h>
|
2002-09-05 20:59:42 +00:00
|
|
|
#include <sys/extattr.h>
|
2002-09-05 09:43:24 +00:00
|
|
|
#include <sys/kernel.h>
|
2003-04-29 13:36:06 +00:00
|
|
|
#include <sys/limits.h>
|
2002-09-05 09:43:24 +00:00
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mount.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
2013-03-09 02:32:23 +00:00
|
|
|
#include <sys/rwlock.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/stat.h>
|
2016-10-19 11:09:29 +00:00
|
|
|
#include <sys/sysctl.h>
|
2002-09-05 09:43:24 +00:00
|
|
|
#include <sys/vmmeter.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/vnode.h>
|
|
|
|
|
|
|
|
#include <vm/vm.h>
|
2012-08-05 14:11:42 +00:00
|
|
|
#include <vm/vm_param.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_extern.h>
|
2002-09-05 09:43:24 +00:00
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_page.h>
|
2002-08-12 10:32:56 +00:00
|
|
|
#include <vm/vm_pager.h>
|
|
|
|
#include <vm/vnode_pager.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
#include <ufs/ufs/extattr.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <ufs/ufs/quota.h>
|
|
|
|
#include <ufs/ufs/inode.h>
|
|
|
|
#include <ufs/ufs/ufs_extern.h>
|
2002-09-05 09:43:24 +00:00
|
|
|
#include <ufs/ufs/ufsmount.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <ufs/ffs/fs.h>
|
|
|
|
#include <ufs/ffs/ffs_extern.h>
|
2003-03-26 23:40:42 +00:00
|
|
|
#include "opt_directio.h"
|
2005-02-08 15:54:30 +00:00
|
|
|
#include "opt_ffs.h"
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2017-01-19 16:46:05 +00:00
|
|
|
#define ALIGNED_TO(ptr, s) \
|
|
|
|
(((uintptr_t)(ptr) & (_Alignof(s) - 1)) == 0)
|
|
|
|
|
2003-03-26 23:40:42 +00:00
|
|
|
#ifdef DIRECTIO
|
|
|
|
extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
|
|
|
|
#endif
|
2016-08-15 19:22:23 +00:00
|
|
|
static vop_fdatasync_t ffs_fdatasync;
|
2016-10-19 11:09:29 +00:00
|
|
|
static vop_fsync_t ffs_fsync;
|
|
|
|
static vop_getpages_t ffs_getpages;
|
2019-02-26 04:56:10 +00:00
|
|
|
static vop_getpages_async_t ffs_getpages_async;
|
2007-05-18 13:02:13 +00:00
|
|
|
static vop_lock1_t ffs_lock;
|
2020-01-13 02:35:15 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
static vop_unlock_t ffs_unlock_debug;
|
|
|
|
#endif
|
2004-12-01 12:24:41 +00:00
|
|
|
static vop_read_t ffs_read;
|
|
|
|
static vop_write_t ffs_write;
|
2002-08-19 07:01:55 +00:00
|
|
|
static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
|
2002-10-14 23:18:09 +00:00
|
|
|
static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
|
|
|
|
struct ucred *cred);
|
2004-12-01 12:24:41 +00:00
|
|
|
static vop_strategy_t ffsext_strategy;
|
|
|
|
static vop_closeextattr_t ffs_closeextattr;
|
|
|
|
static vop_deleteextattr_t ffs_deleteextattr;
|
|
|
|
static vop_getextattr_t ffs_getextattr;
|
|
|
|
static vop_listextattr_t ffs_listextattr;
|
|
|
|
static vop_openextattr_t ffs_openextattr;
|
|
|
|
static vop_setextattr_t ffs_setextattr;
|
2007-02-15 22:08:35 +00:00
|
|
|
static vop_vptofh_t ffs_vptofh;
|
2002-08-12 10:32:56 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Global vfs data structures for ufs. */
|
2005-02-08 21:03:52 +00:00
|
|
|
struct vop_vector ffs_vnodeops1 = {
|
|
|
|
.vop_default = &ufs_vnodeops,
|
|
|
|
.vop_fsync = ffs_fsync,
|
2016-08-15 19:22:23 +00:00
|
|
|
.vop_fdatasync = ffs_fdatasync,
|
2016-10-19 11:09:29 +00:00
|
|
|
.vop_getpages = ffs_getpages,
|
2019-02-26 04:56:10 +00:00
|
|
|
.vop_getpages_async = ffs_getpages_async,
|
2007-05-18 13:02:13 +00:00
|
|
|
.vop_lock1 = ffs_lock,
|
2020-01-13 02:35:15 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
.vop_unlock = ffs_unlock_debug,
|
|
|
|
#endif
|
2005-02-08 21:03:52 +00:00
|
|
|
.vop_read = ffs_read,
|
|
|
|
.vop_reallocblks = ffs_reallocblks,
|
|
|
|
.vop_write = ffs_write,
|
2007-02-15 22:08:35 +00:00
|
|
|
.vop_vptofh = ffs_vptofh,
|
2005-02-08 21:03:52 +00:00
|
|
|
};
|
2019-12-16 00:06:22 +00:00
|
|
|
VFS_VOP_VECTOR_REGISTER(ffs_vnodeops1);
|
2005-02-08 21:03:52 +00:00
|
|
|
|
|
|
|
struct vop_vector ffs_fifoops1 = {
|
|
|
|
.vop_default = &ufs_fifoops,
|
|
|
|
.vop_fsync = ffs_fsync,
|
2016-08-15 19:22:23 +00:00
|
|
|
.vop_fdatasync = ffs_fdatasync,
|
2018-12-30 05:03:41 +00:00
|
|
|
.vop_lock1 = ffs_lock,
|
2020-01-13 02:35:15 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
.vop_unlock = ffs_unlock_debug,
|
|
|
|
#endif
|
2007-02-15 22:08:35 +00:00
|
|
|
.vop_vptofh = ffs_vptofh,
|
2005-02-08 21:03:52 +00:00
|
|
|
};
|
2019-12-16 00:06:22 +00:00
|
|
|
VFS_VOP_VECTOR_REGISTER(ffs_fifoops1);
|
2005-02-08 21:03:52 +00:00
|
|
|
|
|
|
|
/* Global vfs data structures for ufs. */
|
|
|
|
struct vop_vector ffs_vnodeops2 = {
|
2004-12-01 23:16:38 +00:00
|
|
|
.vop_default = &ufs_vnodeops,
|
|
|
|
.vop_fsync = ffs_fsync,
|
2016-08-15 19:22:23 +00:00
|
|
|
.vop_fdatasync = ffs_fdatasync,
|
2016-10-19 11:09:29 +00:00
|
|
|
.vop_getpages = ffs_getpages,
|
2019-02-26 04:56:10 +00:00
|
|
|
.vop_getpages_async = ffs_getpages_async,
|
2007-05-18 13:02:13 +00:00
|
|
|
.vop_lock1 = ffs_lock,
|
2020-01-13 02:35:15 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
.vop_unlock = ffs_unlock_debug,
|
|
|
|
#endif
|
2004-12-01 23:16:38 +00:00
|
|
|
.vop_read = ffs_read,
|
|
|
|
.vop_reallocblks = ffs_reallocblks,
|
|
|
|
.vop_write = ffs_write,
|
|
|
|
.vop_closeextattr = ffs_closeextattr,
|
|
|
|
.vop_deleteextattr = ffs_deleteextattr,
|
2004-12-14 21:35:00 +00:00
|
|
|
.vop_getextattr = ffs_getextattr,
|
2004-12-01 23:16:38 +00:00
|
|
|
.vop_listextattr = ffs_listextattr,
|
|
|
|
.vop_openextattr = ffs_openextattr,
|
2004-12-14 21:35:00 +00:00
|
|
|
.vop_setextattr = ffs_setextattr,
|
2007-02-15 22:08:35 +00:00
|
|
|
.vop_vptofh = ffs_vptofh,
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
2019-12-16 00:06:22 +00:00
|
|
|
VFS_VOP_VECTOR_REGISTER(ffs_vnodeops2);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-02-08 21:03:52 +00:00
|
|
|
struct vop_vector ffs_fifoops2 = {
|
2004-12-01 23:16:38 +00:00
|
|
|
.vop_default = &ufs_fifoops,
|
|
|
|
.vop_fsync = ffs_fsync,
|
2016-08-15 19:22:23 +00:00
|
|
|
.vop_fdatasync = ffs_fdatasync,
|
2007-05-18 13:02:13 +00:00
|
|
|
.vop_lock1 = ffs_lock,
|
2020-01-13 02:35:15 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
.vop_unlock = ffs_unlock_debug,
|
|
|
|
#endif
|
2004-12-01 23:16:38 +00:00
|
|
|
.vop_reallocblks = ffs_reallocblks,
|
|
|
|
.vop_strategy = ffsext_strategy,
|
|
|
|
.vop_closeextattr = ffs_closeextattr,
|
|
|
|
.vop_deleteextattr = ffs_deleteextattr,
|
2004-12-14 21:35:00 +00:00
|
|
|
.vop_getextattr = ffs_getextattr,
|
2004-12-01 23:16:38 +00:00
|
|
|
.vop_listextattr = ffs_listextattr,
|
|
|
|
.vop_openextattr = ffs_openextattr,
|
2004-12-14 21:35:00 +00:00
|
|
|
.vop_setextattr = ffs_setextattr,
|
2007-02-15 22:08:35 +00:00
|
|
|
.vop_vptofh = ffs_vptofh,
|
2004-12-01 23:16:38 +00:00
|
|
|
};
|
2019-12-16 00:06:22 +00:00
|
|
|
VFS_VOP_VECTOR_REGISTER(ffs_fifoops2);
|
1994-09-21 03:47:43 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Synch an open file.
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
2002-10-14 23:18:09 +00:00
|
|
|
static int
|
2005-02-08 16:25:50 +00:00
|
|
|
ffs_fsync(struct vop_fsync_args *ap)
|
|
|
|
{
|
2009-06-30 10:07:33 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct bufobj *bo;
|
2005-02-08 16:25:50 +00:00
|
|
|
int error;
|
|
|
|
|
2009-06-30 10:07:33 +00:00
|
|
|
vp = ap->a_vp;
|
|
|
|
bo = &vp->v_bufobj;
|
|
|
|
retry:
|
2012-03-25 00:02:37 +00:00
|
|
|
error = ffs_syncvnode(vp, ap->a_waitfor, 0);
|
2005-02-08 18:09:11 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2011-07-30 00:43:18 +00:00
|
|
|
if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
|
2009-06-30 10:07:33 +00:00
|
|
|
error = softdep_fsync(vp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The softdep_fsync() function may drop vp lock,
|
|
|
|
* allowing for dirty buffers to reappear on the
|
|
|
|
* bo_dirty list. Recheck and resync as needed.
|
|
|
|
*/
|
|
|
|
BO_LOCK(bo);
|
2015-06-03 20:48:00 +00:00
|
|
|
if ((vp->v_type == VREG || vp->v_type == VDIR) &&
|
|
|
|
(bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)) {
|
2009-06-30 10:07:33 +00:00
|
|
|
BO_UNLOCK(bo);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
BO_UNLOCK(bo);
|
|
|
|
}
|
This commit enables a UFS filesystem to do a forcible unmount when
the underlying media fails or becomes inaccessible. For example
when a USB flash memory card hosting a UFS filesystem is unplugged.
The strategy for handling disk I/O errors when soft updates are
enabled is to stop writing to the disk of the affected file system
but continue to accept I/O requests and report that all future
writes by the file system to that disk actually succeed. Then
initiate an asynchronous forced unmount of the affected file system.
There are two cases for disk I/O errors:
- ENXIO, which means that this disk is gone and the lower layers
of the storage stack already guarantee that no future I/O to
this disk will succeed.
- EIO (or most other errors), which means that this particular
I/O request has failed but subsequent I/O requests to this
disk might still succeed.
For ENXIO, we can just clear the error and continue, because we
know that the file system cannot affect the on-disk state after we
see this error. For EIO or other errors, we arrange for the geom_vfs
layer to reject all future I/O requests with ENXIO just like is
done when the geom_vfs is orphaned. In both cases, the file system
code can just clear the error and proceed with the forcible unmount.
This new treatment of I/O errors is needed for writes of any buffer
that is involved in a dependency. Most dependencies are described
by a structure attached to the buffer's b_dep field. But some are
created and processed as a result of the completion of the dependencies
attached to the buffer.
Clearing of some dependencies require a read. For example if there
is a dependency that requires an inode to be written, the disk block
containing that inode must be read, the updated inode copied into
place in that buffer, and the buffer then written back to disk.
Often the needed buffer is already in memory and can be used. But
if it needs to be read from the disk, the read will fail, so we
fabricate a buffer full of zeroes and pretend that the read succeeded.
This zero'ed buffer can be updated and written back to disk.
The only case where a buffer full of zeros causes the code to do
the wrong thing is when reading an inode buffer containing an inode
that still has an inode dependency in memory that will reinitialize
the effective link count (i_effnlink) based on the actual link count
(i_nlink) that we read. To handle this case we now store the i_nlink
value that we wrote in the inode dependency so that it can be
restored into the zero'ed buffer thus keeping the tracking of the
inode link count consistent.
Because applications depend on knowing when an attempt to write
their data to stable storage has failed, the fsync(2) and msync(2)
system calls need to return errors if data fails to be written to
stable storage. So these operations return ENXIO for every call
made on files in a file system where we have otherwise been ignoring
I/O errors.
Coauthered by: mckusick
Reviewed by: kib
Tested by: Peter Holm
Approved by: mckusick (mentor)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D24088
2020-05-25 23:47:31 +00:00
|
|
|
if (ffs_fsfail_cleanup(VFSTOUFS(vp->v_mount), 0))
|
|
|
|
return (ENXIO);
|
2009-06-30 10:07:33 +00:00
|
|
|
return (0);
|
2005-02-08 16:25:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-03-25 00:02:37 +00:00
|
|
|
ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2011-06-10 22:48:35 +00:00
|
|
|
struct inode *ip;
|
2008-03-22 09:15:16 +00:00
|
|
|
struct bufobj *bo;
|
This commit enables a UFS filesystem to do a forcible unmount when
the underlying media fails or becomes inaccessible. For example
when a USB flash memory card hosting a UFS filesystem is unplugged.
The strategy for handling disk I/O errors when soft updates are
enabled is to stop writing to the disk of the affected file system
but continue to accept I/O requests and report that all future
writes by the file system to that disk actually succeed. Then
initiate an asynchronous forced unmount of the affected file system.
There are two cases for disk I/O errors:
- ENXIO, which means that this disk is gone and the lower layers
of the storage stack already guarantee that no future I/O to
this disk will succeed.
- EIO (or most other errors), which means that this particular
I/O request has failed but subsequent I/O requests to this
disk might still succeed.
For ENXIO, we can just clear the error and continue, because we
know that the file system cannot affect the on-disk state after we
see this error. For EIO or other errors, we arrange for the geom_vfs
layer to reject all future I/O requests with ENXIO just like is
done when the geom_vfs is orphaned. In both cases, the file system
code can just clear the error and proceed with the forcible unmount.
This new treatment of I/O errors is needed for writes of any buffer
that is involved in a dependency. Most dependencies are described
by a structure attached to the buffer's b_dep field. But some are
created and processed as a result of the completion of the dependencies
attached to the buffer.
Clearing of some dependencies require a read. For example if there
is a dependency that requires an inode to be written, the disk block
containing that inode must be read, the updated inode copied into
place in that buffer, and the buffer then written back to disk.
Often the needed buffer is already in memory and can be used. But
if it needs to be read from the disk, the read will fail, so we
fabricate a buffer full of zeroes and pretend that the read succeeded.
This zero'ed buffer can be updated and written back to disk.
The only case where a buffer full of zeros causes the code to do
the wrong thing is when reading an inode buffer containing an inode
that still has an inode dependency in memory that will reinitialize
the effective link count (i_effnlink) based on the actual link count
(i_nlink) that we read. To handle this case we now store the i_nlink
value that we wrote in the inode dependency so that it can be
restored into the zero'ed buffer thus keeping the tracking of the
inode link count consistent.
Because applications depend on knowing when an attempt to write
their data to stable storage has failed, the fsync(2) and msync(2)
system calls need to return errors if data fails to be written to
stable storage. So these operations return ENXIO for every call
made on files in a file system where we have otherwise been ignoring
I/O errors.
Coauthered by: mckusick
Reviewed by: kib
Tested by: Peter Holm
Approved by: mckusick (mentor)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D24088
2020-05-25 23:47:31 +00:00
|
|
|
struct ufsmount *ump;
|
2016-08-15 19:22:23 +00:00
|
|
|
struct buf *bp, *nbp;
|
2002-06-21 06:18:05 +00:00
|
|
|
ufs_lbn_t lbn;
|
2016-08-15 19:22:23 +00:00
|
|
|
int error, passes;
|
|
|
|
bool still_dirty, wait;
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
|
2011-06-10 22:48:35 +00:00
|
|
|
ip = VTOI(vp);
|
2010-04-24 07:05:35 +00:00
|
|
|
ip->i_flag &= ~IN_NEEDSYNC;
|
2011-06-10 22:48:35 +00:00
|
|
|
bo = &vp->v_bufobj;
|
This commit enables a UFS filesystem to do a forcible unmount when
the underlying media fails or becomes inaccessible. For example
when a USB flash memory card hosting a UFS filesystem is unplugged.
The strategy for handling disk I/O errors when soft updates are
enabled is to stop writing to the disk of the affected file system
but continue to accept I/O requests and report that all future
writes by the file system to that disk actually succeed. Then
initiate an asynchronous forced unmount of the affected file system.
There are two cases for disk I/O errors:
- ENXIO, which means that this disk is gone and the lower layers
of the storage stack already guarantee that no future I/O to
this disk will succeed.
- EIO (or most other errors), which means that this particular
I/O request has failed but subsequent I/O requests to this
disk might still succeed.
For ENXIO, we can just clear the error and continue, because we
know that the file system cannot affect the on-disk state after we
see this error. For EIO or other errors, we arrange for the geom_vfs
layer to reject all future I/O requests with ENXIO just like is
done when the geom_vfs is orphaned. In both cases, the file system
code can just clear the error and proceed with the forcible unmount.
This new treatment of I/O errors is needed for writes of any buffer
that is involved in a dependency. Most dependencies are described
by a structure attached to the buffer's b_dep field. But some are
created and processed as a result of the completion of the dependencies
attached to the buffer.
Clearing of some dependencies require a read. For example if there
is a dependency that requires an inode to be written, the disk block
containing that inode must be read, the updated inode copied into
place in that buffer, and the buffer then written back to disk.
Often the needed buffer is already in memory and can be used. But
if it needs to be read from the disk, the read will fail, so we
fabricate a buffer full of zeroes and pretend that the read succeeded.
This zero'ed buffer can be updated and written back to disk.
The only case where a buffer full of zeros causes the code to do
the wrong thing is when reading an inode buffer containing an inode
that still has an inode dependency in memory that will reinitialize
the effective link count (i_effnlink) based on the actual link count
(i_nlink) that we read. To handle this case we now store the i_nlink
value that we wrote in the inode dependency so that it can be
restored into the zero'ed buffer thus keeping the tracking of the
inode link count consistent.
Because applications depend on knowing when an attempt to write
their data to stable storage has failed, the fsync(2) and msync(2)
system calls need to return errors if data fails to be written to
stable storage. So these operations return ENXIO for every call
made on files in a file system where we have otherwise been ignoring
I/O errors.
Coauthered by: mckusick
Reviewed by: kib
Tested by: Peter Holm
Approved by: mckusick (mentor)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D24088
2020-05-25 23:47:31 +00:00
|
|
|
ump = VFSTOUFS(vp->v_mount);
|
2011-06-10 22:48:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When doing MNT_WAIT we must first flush all dependencies
|
|
|
|
* on the inode.
|
|
|
|
*/
|
|
|
|
if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
|
This commit enables a UFS filesystem to do a forcible unmount when
the underlying media fails or becomes inaccessible. For example
when a USB flash memory card hosting a UFS filesystem is unplugged.
The strategy for handling disk I/O errors when soft updates are
enabled is to stop writing to the disk of the affected file system
but continue to accept I/O requests and report that all future
writes by the file system to that disk actually succeed. Then
initiate an asynchronous forced unmount of the affected file system.
There are two cases for disk I/O errors:
- ENXIO, which means that this disk is gone and the lower layers
of the storage stack already guarantee that no future I/O to
this disk will succeed.
- EIO (or most other errors), which means that this particular
I/O request has failed but subsequent I/O requests to this
disk might still succeed.
For ENXIO, we can just clear the error and continue, because we
know that the file system cannot affect the on-disk state after we
see this error. For EIO or other errors, we arrange for the geom_vfs
layer to reject all future I/O requests with ENXIO just like is
done when the geom_vfs is orphaned. In both cases, the file system
code can just clear the error and proceed with the forcible unmount.
This new treatment of I/O errors is needed for writes of any buffer
that is involved in a dependency. Most dependencies are described
by a structure attached to the buffer's b_dep field. But some are
created and processed as a result of the completion of the dependencies
attached to the buffer.
Clearing of some dependencies require a read. For example if there
is a dependency that requires an inode to be written, the disk block
containing that inode must be read, the updated inode copied into
place in that buffer, and the buffer then written back to disk.
Often the needed buffer is already in memory and can be used. But
if it needs to be read from the disk, the read will fail, so we
fabricate a buffer full of zeroes and pretend that the read succeeded.
This zero'ed buffer can be updated and written back to disk.
The only case where a buffer full of zeros causes the code to do
the wrong thing is when reading an inode buffer containing an inode
that still has an inode dependency in memory that will reinitialize
the effective link count (i_effnlink) based on the actual link count
(i_nlink) that we read. To handle this case we now store the i_nlink
value that we wrote in the inode dependency so that it can be
restored into the zero'ed buffer thus keeping the tracking of the
inode link count consistent.
Because applications depend on knowing when an attempt to write
their data to stable storage has failed, the fsync(2) and msync(2)
system calls need to return errors if data fails to be written to
stable storage. So these operations return ENXIO for every call
made on files in a file system where we have otherwise been ignoring
I/O errors.
Coauthered by: mckusick
Reviewed by: kib
Tested by: Peter Holm
Approved by: mckusick (mentor)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D24088
2020-05-25 23:47:31 +00:00
|
|
|
(error = softdep_sync_metadata(vp)) != 0) {
|
|
|
|
if (ffs_fsfail_cleanup(ump, error))
|
|
|
|
error = 0;
|
2011-06-10 22:48:35 +00:00
|
|
|
return (error);
|
This commit enables a UFS filesystem to do a forcible unmount when
the underlying media fails or becomes inaccessible. For example
when a USB flash memory card hosting a UFS filesystem is unplugged.
The strategy for handling disk I/O errors when soft updates are
enabled is to stop writing to the disk of the affected file system
but continue to accept I/O requests and report that all future
writes by the file system to that disk actually succeed. Then
initiate an asynchronous forced unmount of the affected file system.
There are two cases for disk I/O errors:
- ENXIO, which means that this disk is gone and the lower layers
of the storage stack already guarantee that no future I/O to
this disk will succeed.
- EIO (or most other errors), which means that this particular
I/O request has failed but subsequent I/O requests to this
disk might still succeed.
For ENXIO, we can just clear the error and continue, because we
know that the file system cannot affect the on-disk state after we
see this error. For EIO or other errors, we arrange for the geom_vfs
layer to reject all future I/O requests with ENXIO just like is
done when the geom_vfs is orphaned. In both cases, the file system
code can just clear the error and proceed with the forcible unmount.
This new treatment of I/O errors is needed for writes of any buffer
that is involved in a dependency. Most dependencies are described
by a structure attached to the buffer's b_dep field. But some are
created and processed as a result of the completion of the dependencies
attached to the buffer.
Clearing of some dependencies require a read. For example if there
is a dependency that requires an inode to be written, the disk block
containing that inode must be read, the updated inode copied into
place in that buffer, and the buffer then written back to disk.
Often the needed buffer is already in memory and can be used. But
if it needs to be read from the disk, the read will fail, so we
fabricate a buffer full of zeroes and pretend that the read succeeded.
This zero'ed buffer can be updated and written back to disk.
The only case where a buffer full of zeros causes the code to do
the wrong thing is when reading an inode buffer containing an inode
that still has an inode dependency in memory that will reinitialize
the effective link count (i_effnlink) based on the actual link count
(i_nlink) that we read. To handle this case we now store the i_nlink
value that we wrote in the inode dependency so that it can be
restored into the zero'ed buffer thus keeping the tracking of the
inode link count consistent.
Because applications depend on knowing when an attempt to write
their data to stable storage has failed, the fsync(2) and msync(2)
system calls need to return errors if data fails to be written to
stable storage. So these operations return ENXIO for every call
made on files in a file system where we have otherwise been ignoring
I/O errors.
Coauthered by: mckusick
Reviewed by: kib
Tested by: Peter Holm
Approved by: mckusick (mentor)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D24088
2020-05-25 23:47:31 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush all dirty buffers associated with a vnode.
|
|
|
|
*/
|
2011-06-10 22:48:35 +00:00
|
|
|
error = 0;
|
|
|
|
passes = 0;
|
2016-08-15 19:22:23 +00:00
|
|
|
wait = false; /* Always do an async pass first. */
|
2016-09-17 16:47:34 +00:00
|
|
|
lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1));
|
2008-03-22 09:15:16 +00:00
|
|
|
BO_LOCK(bo);
|
1999-03-02 04:04:31 +00:00
|
|
|
loop:
|
2008-03-22 09:15:16 +00:00
|
|
|
TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
|
2003-02-09 11:28:35 +00:00
|
|
|
bp->b_vflags &= ~BV_SCANNED;
|
2008-03-22 09:15:16 +00:00
|
|
|
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
|
2008-01-02 01:19:17 +00:00
|
|
|
/*
|
2000-01-10 00:24:24 +00:00
|
|
|
* Reasons to skip this buffer: it has already been considered
|
2011-06-10 22:48:35 +00:00
|
|
|
* on this pass, the buffer has dependencies that will cause
|
2000-01-10 00:24:24 +00:00
|
|
|
* it to be redirtied and it has not already been deferred,
|
|
|
|
* or it is already being written.
|
1998-03-08 09:59:44 +00:00
|
|
|
*/
|
2003-02-09 11:28:35 +00:00
|
|
|
if ((bp->b_vflags & BV_SCANNED) != 0)
|
2000-01-10 00:24:24 +00:00
|
|
|
continue;
|
2003-02-09 11:28:35 +00:00
|
|
|
bp->b_vflags |= BV_SCANNED;
|
2016-08-15 19:22:23 +00:00
|
|
|
/*
|
|
|
|
* Flush indirects in order, if requested.
|
|
|
|
*
|
|
|
|
* Note that if only datasync is requested, we can
|
|
|
|
* skip indirect blocks when softupdates are not
|
|
|
|
* active. Otherwise we must flush them with data,
|
|
|
|
* since dependencies prevent data block writes.
|
|
|
|
*/
|
2017-02-15 19:50:26 +00:00
|
|
|
if (waitfor == MNT_WAIT && bp->b_lblkno <= -UFS_NDADDR &&
|
2016-08-15 19:22:23 +00:00
|
|
|
(lbn_level(bp->b_lblkno) >= passes ||
|
|
|
|
((flags & DATA_ONLY) != 0 && !DOINGSOFTDEP(vp))))
|
2000-01-10 00:24:24 +00:00
|
|
|
continue;
|
2011-06-10 22:48:35 +00:00
|
|
|
if (bp->b_lblkno > lbn)
|
|
|
|
panic("ffs_syncvnode: syncing truncated data.");
|
2014-03-06 00:13:21 +00:00
|
|
|
if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
|
|
|
|
BO_UNLOCK(bo);
|
2016-08-15 19:22:23 +00:00
|
|
|
} else if (wait) {
|
2014-03-06 00:13:21 +00:00
|
|
|
if (BUF_LOCK(bp,
|
|
|
|
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
|
|
|
|
BO_LOCKPTR(bo)) != 0) {
|
|
|
|
bp->b_vflags &= ~BV_SCANNED;
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
} else
|
2003-02-09 11:28:35 +00:00
|
|
|
continue;
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((bp->b_flags & B_DELWRI) == 0)
|
|
|
|
panic("ffs_fsync: not dirty");
|
1998-03-08 09:59:44 +00:00
|
|
|
/*
|
2011-06-10 22:48:35 +00:00
|
|
|
* Check for dependencies and potentially complete them.
|
1998-03-08 09:59:44 +00:00
|
|
|
*/
|
2011-06-10 22:48:35 +00:00
|
|
|
if (!LIST_EMPTY(&bp->b_dep) &&
|
|
|
|
(error = softdep_sync_buf(vp, bp,
|
|
|
|
wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
|
|
|
|
/* I/O error. */
|
|
|
|
if (error != EBUSY) {
|
|
|
|
BUF_UNLOCK(bp);
|
|
|
|
return (error);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
}
|
2011-06-10 22:48:35 +00:00
|
|
|
/* If we deferred once, don't defer again. */
|
|
|
|
if ((bp->b_flags & B_DEFERRED) == 0) {
|
|
|
|
bp->b_flags |= B_DEFERRED;
|
|
|
|
BUF_UNLOCK(bp);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (wait) {
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
bremfree(bp);
|
This commit enables a UFS filesystem to do a forcible unmount when
the underlying media fails or becomes inaccessible. For example
when a USB flash memory card hosting a UFS filesystem is unplugged.
The strategy for handling disk I/O errors when soft updates are
enabled is to stop writing to the disk of the affected file system
but continue to accept I/O requests and report that all future
writes by the file system to that disk actually succeed. Then
initiate an asynchronous forced unmount of the affected file system.
There are two cases for disk I/O errors:
- ENXIO, which means that this disk is gone and the lower layers
of the storage stack already guarantee that no future I/O to
this disk will succeed.
- EIO (or most other errors), which means that this particular
I/O request has failed but subsequent I/O requests to this
disk might still succeed.
For ENXIO, we can just clear the error and continue, because we
know that the file system cannot affect the on-disk state after we
see this error. For EIO or other errors, we arrange for the geom_vfs
layer to reject all future I/O requests with ENXIO just like is
done when the geom_vfs is orphaned. In both cases, the file system
code can just clear the error and proceed with the forcible unmount.
This new treatment of I/O errors is needed for writes of any buffer
that is involved in a dependency. Most dependencies are described
by a structure attached to the buffer's b_dep field. But some are
created and processed as a result of the completion of the dependencies
attached to the buffer.
Clearing of some dependencies require a read. For example if there
is a dependency that requires an inode to be written, the disk block
containing that inode must be read, the updated inode copied into
place in that buffer, and the buffer then written back to disk.
Often the needed buffer is already in memory and can be used. But
if it needs to be read from the disk, the read will fail, so we
fabricate a buffer full of zeroes and pretend that the read succeeded.
This zero'ed buffer can be updated and written back to disk.
The only case where a buffer full of zeros causes the code to do
the wrong thing is when reading an inode buffer containing an inode
that still has an inode dependency in memory that will reinitialize
the effective link count (i_effnlink) based on the actual link count
(i_nlink) that we read. To handle this case we now store the i_nlink
value that we wrote in the inode dependency so that it can be
restored into the zero'ed buffer thus keeping the tracking of the
inode link count consistent.
Because applications depend on knowing when an attempt to write
their data to stable storage has failed, the fsync(2) and msync(2)
system calls need to return errors if data fails to be written to
stable storage. So these operations return ENXIO for every call
made on files in a file system where we have otherwise been ignoring
I/O errors.
Coauthered by: mckusick
Reviewed by: kib
Tested by: Peter Holm
Approved by: mckusick (mentor)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D24088
2020-05-25 23:47:31 +00:00
|
|
|
error = bwrite(bp);
|
|
|
|
if (ffs_fsfail_cleanup(ump, error))
|
|
|
|
error = 0;
|
|
|
|
if (error != 0)
|
2011-06-10 22:48:35 +00:00
|
|
|
return (error);
|
|
|
|
} else if ((bp->b_flags & B_CLUSTEROK)) {
|
|
|
|
(void) vfs_bio_awrite(bp);
|
|
|
|
} else {
|
|
|
|
bremfree(bp);
|
|
|
|
(void) bawrite(bp);
|
|
|
|
}
|
|
|
|
next:
|
1999-03-02 04:04:31 +00:00
|
|
|
/*
|
2008-01-02 01:19:17 +00:00
|
|
|
* Since we may have slept during the I/O, we need
|
1999-03-02 04:04:31 +00:00
|
|
|
* to start from a known point.
|
|
|
|
*/
|
2008-03-22 09:15:16 +00:00
|
|
|
BO_LOCK(bo);
|
|
|
|
nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2011-06-10 22:48:35 +00:00
|
|
|
if (waitfor != MNT_WAIT) {
|
|
|
|
BO_UNLOCK(bo);
|
2012-03-25 00:02:37 +00:00
|
|
|
if ((flags & NO_INO_UPDT) != 0)
|
2012-03-01 18:45:25 +00:00
|
|
|
return (0);
|
|
|
|
else
|
2012-03-11 20:18:14 +00:00
|
|
|
return (ffs_update(vp, 0));
|
2011-06-10 22:48:35 +00:00
|
|
|
}
|
|
|
|
/* Drain IO to see if we're done. */
|
|
|
|
bufobj_wwait(bo, 0, 0);
|
1998-03-08 09:59:44 +00:00
|
|
|
/*
|
2011-06-10 22:48:35 +00:00
|
|
|
* Block devices associated with filesystems may have new I/O
|
|
|
|
* requests posted for them even if the vnode is locked, so no
|
|
|
|
* amount of trying will get them clean. We make several passes
|
|
|
|
* as a best effort.
|
|
|
|
*
|
|
|
|
* Regular files may need multiple passes to flush all dependency
|
|
|
|
* work as it is possible that we must write once per indirect
|
|
|
|
* level, once for the leaf, and once for the inode and each of
|
|
|
|
* these will be done with one sync and one async pass.
|
1998-03-08 09:59:44 +00:00
|
|
|
*/
|
2011-06-10 22:48:35 +00:00
|
|
|
if (bo->bo_dirty.bv_cnt > 0) {
|
2016-08-15 19:22:23 +00:00
|
|
|
if ((flags & DATA_ONLY) == 0) {
|
|
|
|
still_dirty = true;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* For data-only sync, dirty indirect buffers
|
|
|
|
* are ignored.
|
|
|
|
*/
|
|
|
|
still_dirty = false;
|
|
|
|
TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
|
2017-02-15 19:50:26 +00:00
|
|
|
if (bp->b_lblkno > -UFS_NDADDR) {
|
2016-08-15 19:22:23 +00:00
|
|
|
still_dirty = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-06-10 22:48:35 +00:00
|
|
|
}
|
2016-08-15 19:22:23 +00:00
|
|
|
|
|
|
|
if (still_dirty) {
|
|
|
|
/* Write the inode after sync passes to flush deps. */
|
|
|
|
if (wait && DOINGSOFTDEP(vp) &&
|
|
|
|
(flags & NO_INO_UPDT) == 0) {
|
|
|
|
BO_UNLOCK(bo);
|
|
|
|
ffs_update(vp, 1);
|
|
|
|
BO_LOCK(bo);
|
|
|
|
}
|
|
|
|
/* switch between sync/async. */
|
|
|
|
wait = !wait;
|
2017-02-15 19:50:26 +00:00
|
|
|
if (wait || ++passes < UFS_NIADDR + 2)
|
2016-08-15 19:22:23 +00:00
|
|
|
goto loop;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2008-03-22 09:15:16 +00:00
|
|
|
BO_UNLOCK(bo);
|
2012-03-01 18:45:25 +00:00
|
|
|
error = 0;
|
2016-08-15 19:22:23 +00:00
|
|
|
if ((flags & DATA_ONLY) == 0) {
|
|
|
|
if ((flags & NO_INO_UPDT) == 0)
|
|
|
|
error = ffs_update(vp, 1);
|
|
|
|
if (DOINGSUJ(vp))
|
|
|
|
softdep_journal_fsync(VTOI(vp));
|
2020-06-05 01:00:55 +00:00
|
|
|
} else if ((ip->i_flags & (IN_SIZEMOD | IN_IBLKDATA)) != 0) {
|
2020-06-04 12:23:15 +00:00
|
|
|
error = ffs_update(vp, 1);
|
2016-08-15 19:22:23 +00:00
|
|
|
}
|
2011-06-10 22:48:35 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-08-12 10:32:56 +00:00
|
|
|
|
2016-08-15 19:22:23 +00:00
|
|
|
static int
|
|
|
|
ffs_fdatasync(struct vop_fdatasync_args *ap)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY));
|
|
|
|
}
|
|
|
|
|
2005-02-08 15:54:30 +00:00
|
|
|
static int
|
|
|
|
ffs_lock(ap)
|
2007-05-18 13:02:13 +00:00
|
|
|
struct vop_lock1_args /* {
|
2005-02-08 15:54:30 +00:00
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_flags;
|
2006-11-13 05:51:22 +00:00
|
|
|
char *file;
|
|
|
|
int line;
|
2005-02-08 15:54:30 +00:00
|
|
|
} */ *ap;
|
|
|
|
{
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
#if !defined(NO_FFS_SNAPSHOT) || defined(DIAGNOSTIC)
|
|
|
|
struct vnode *vp = ap->a_vp;
|
|
|
|
#endif /* !NO_FFS_SNAPSHOT || DIAGNOSTIC */
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
struct inode *ip;
|
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
int result;
|
2006-05-05 19:58:36 +00:00
|
|
|
#ifndef NO_FFS_SNAPSHOT
|
2006-05-02 23:52:43 +00:00
|
|
|
int flags;
|
|
|
|
struct lock *lkp;
|
2008-01-02 01:19:17 +00:00
|
|
|
|
2020-08-04 23:09:15 +00:00
|
|
|
/*
|
|
|
|
* Adaptive spinning mixed with SU leads to trouble. use a giant hammer
|
|
|
|
* and only use it when LK_NODDLKTREAT is set. Currently this means it
|
|
|
|
* is only used during path lookup.
|
|
|
|
*/
|
|
|
|
if ((ap->a_flags & LK_NODDLKTREAT) != 0)
|
|
|
|
ap->a_flags |= LK_ADAPTIVE;
|
2006-05-02 23:52:43 +00:00
|
|
|
switch (ap->a_flags & LK_TYPE_MASK) {
|
|
|
|
case LK_SHARED:
|
|
|
|
case LK_UPGRADE:
|
|
|
|
case LK_EXCLUSIVE:
|
|
|
|
flags = ap->a_flags;
|
|
|
|
for (;;) {
|
2008-10-20 10:11:33 +00:00
|
|
|
#ifdef DEBUG_VFS_LOCKS
|
2020-02-03 14:25:32 +00:00
|
|
|
VNPASS(vp->v_holdcnt != 0, vp);
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
#endif /* DEBUG_VFS_LOCKS */
|
2006-05-02 23:52:43 +00:00
|
|
|
lkp = vp->v_vnlock;
|
2020-02-15 21:48:48 +00:00
|
|
|
result = lockmgr_lock_flags(lkp, flags,
|
|
|
|
&VI_MTX(vp)->lock_object, ap->a_file, ap->a_line);
|
2006-05-02 23:52:43 +00:00
|
|
|
if (lkp == vp->v_vnlock || result != 0)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* Apparent success, except that the vnode
|
|
|
|
* mutated between snapshot file vnode and
|
|
|
|
* regular file vnode while this process
|
|
|
|
* slept. The lock currently held is not the
|
|
|
|
* right lock. Release it, and try to get the
|
|
|
|
* new lock.
|
|
|
|
*/
|
2020-02-15 21:48:48 +00:00
|
|
|
lockmgr_unlock(lkp);
|
2008-03-31 07:55:45 +00:00
|
|
|
if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
|
|
|
|
(LK_INTERLOCK | LK_NOWAIT))
|
|
|
|
return (EBUSY);
|
2006-05-02 23:52:43 +00:00
|
|
|
if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
|
|
|
|
flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
|
|
|
|
flags &= ~LK_INTERLOCK;
|
|
|
|
}
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
switch (ap->a_flags & LK_TYPE_MASK) {
|
|
|
|
case LK_UPGRADE:
|
|
|
|
case LK_EXCLUSIVE:
|
|
|
|
if (result == 0 && vp->v_vnlock->lk_recurse == 0) {
|
|
|
|
ip = VTOI(vp);
|
|
|
|
if (ip != NULL)
|
|
|
|
ip->i_lock_gen++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* DIAGNOSTIC */
|
2006-05-02 23:52:43 +00:00
|
|
|
break;
|
|
|
|
default:
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if ((ap->a_flags & LK_TYPE_MASK) == LK_DOWNGRADE) {
|
|
|
|
ip = VTOI(vp);
|
|
|
|
if (ip != NULL)
|
|
|
|
ufs_unlock_tracker(ip);
|
|
|
|
}
|
|
|
|
#endif /* DIAGNOSTIC */
|
2007-05-18 13:02:13 +00:00
|
|
|
result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
break;
|
2006-05-02 23:52:43 +00:00
|
|
|
}
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
#else /* NO_FFS_SNAPSHOT */
|
2020-08-04 23:09:15 +00:00
|
|
|
/*
|
|
|
|
* See above for an explanation.
|
|
|
|
*/
|
|
|
|
if ((ap->a_flags & LK_NODDLKTREAT) != 0)
|
|
|
|
ap->a_flags |= LK_ADAPTIVE;
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if ((ap->a_flags & LK_TYPE_MASK) == LK_DOWNGRADE) {
|
|
|
|
ip = VTOI(vp);
|
|
|
|
if (ip != NULL)
|
|
|
|
ufs_unlock_tracker(ip);
|
|
|
|
}
|
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
|
|
|
|
#endif /* NO_FFS_SNAPSHOT */
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
switch (ap->a_flags & LK_TYPE_MASK) {
|
|
|
|
case LK_UPGRADE:
|
|
|
|
case LK_EXCLUSIVE:
|
|
|
|
if (result == 0 && vp->v_vnlock->lk_recurse == 0) {
|
|
|
|
ip = VTOI(vp);
|
|
|
|
if (ip != NULL)
|
|
|
|
ip->i_lock_gen++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* DIAGNOSTIC */
|
|
|
|
return (result);
|
2005-02-08 15:54:30 +00:00
|
|
|
}
|
2002-08-12 10:32:56 +00:00
|
|
|
|
2020-01-13 02:35:15 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
static int
|
|
|
|
ffs_unlock_debug(struct vop_unlock_args *ap)
|
|
|
|
{
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
struct inode *ip;
|
2020-01-13 02:35:15 +00:00
|
|
|
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
vp = ap->a_vp;
|
|
|
|
ip = VTOI(vp);
|
2020-01-13 14:33:51 +00:00
|
|
|
if (ip->i_flag & UFS_INODE_FLAG_LAZY_MASK_ASSERTABLE) {
|
2020-01-13 02:35:15 +00:00
|
|
|
if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
|
|
|
|
VI_LOCK(vp);
|
|
|
|
VNASSERT((vp->v_mflag & VMP_LAZYLIST), vp,
|
|
|
|
("%s: modified vnode (%x) not on lazy list",
|
|
|
|
__func__, ip->i_flag));
|
|
|
|
VI_UNLOCK(vp);
|
|
|
|
}
|
|
|
|
}
|
Add a framework that tracks exclusive vnode lock generation count for UFS.
This count is memoized together with the lookup metadata in directory
inode, and we assert that accesses to lookup metadata are done under
the same lock generation as they were stored. Enabled under DIAGNOSTICS.
UFS saves additional data for parent dirent when doing lookup
(i_offset, i_count, i_endoff), and this data is used later by VOPs
operating on dirents. If parent vnode exclusive lock is dropped and
re-acquired between lookup and the VOP call, we corrupt directories.
Framework asserts that corruption cannot occur that way, by tracking
vnode lock generation counter. Updates to inode dirent members also
save the counter, while users compare current and saved counters
values.
Also, fix a case in ufs_lookup_ino() where i_offset and i_count could
be updated under shared lock. It is not a bug on its own since dvp
i_offset results from such lookup cannot be used, but it causes false
positive in the checker.
In collaboration with: pho
Reviewed by: mckusick (previous version), markj
Tested by: markj (syzkaller), pho
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D26136
2020-11-14 05:10:39 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && ip != NULL &&
|
|
|
|
vp->v_vnlock->lk_recurse == 0)
|
|
|
|
ufs_unlock_tracker(ip);
|
|
|
|
#endif
|
2020-01-13 02:35:15 +00:00
|
|
|
return (VOP_UNLOCK_APV(&ufs_vnodeops, ap));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-05-13 09:47:28 +00:00
|
|
|
static int
|
|
|
|
ffs_read_hole(struct uio *uio, long xfersize, long *size)
|
|
|
|
{
|
|
|
|
ssize_t saved_resid, tlen;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
while (xfersize > 0) {
|
|
|
|
tlen = min(xfersize, ZERO_REGION_SIZE);
|
|
|
|
saved_resid = uio->uio_resid;
|
|
|
|
error = vn_io_fault_uiomove(__DECONST(void *, zero_region),
|
|
|
|
tlen, uio);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
tlen = saved_resid - uio->uio_resid;
|
|
|
|
xfersize -= tlen;
|
|
|
|
*size -= tlen;
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2002-08-12 10:32:56 +00:00
|
|
|
/*
|
|
|
|
* Vnode op for reading.
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2002-08-12 10:32:56 +00:00
|
|
|
ffs_read(ap)
|
|
|
|
struct vop_read_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
struct ucred *a_cred;
|
|
|
|
} */ *ap;
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
struct inode *ip;
|
|
|
|
struct uio *uio;
|
|
|
|
struct fs *fs;
|
|
|
|
struct buf *bp;
|
|
|
|
ufs_lbn_t lbn, nextlbn;
|
|
|
|
off_t bytesinfile;
|
|
|
|
long size, xfersize, blkoffset;
|
2012-02-21 01:05:12 +00:00
|
|
|
ssize_t orig_resid;
|
2018-05-13 09:47:28 +00:00
|
|
|
int bflag, error, ioflag, seqcount;
|
2002-08-12 10:32:56 +00:00
|
|
|
|
2002-08-13 11:33:01 +00:00
|
|
|
vp = ap->a_vp;
|
|
|
|
uio = ap->a_uio;
|
|
|
|
ioflag = ap->a_ioflag;
|
2002-08-12 10:32:56 +00:00
|
|
|
if (ap->a_ioflag & IO_EXT)
|
2002-08-19 07:01:55 +00:00
|
|
|
#ifdef notyet
|
|
|
|
return (ffs_extread(vp, uio, ioflag));
|
|
|
|
#else
|
|
|
|
panic("ffs_read+IO_EXT");
|
|
|
|
#endif
|
2003-03-26 23:40:42 +00:00
|
|
|
#ifdef DIRECTIO
|
|
|
|
if ((ioflag & IO_DIRECT) != 0) {
|
|
|
|
int workdone;
|
|
|
|
|
|
|
|
error = ffs_rawread(vp, uio, &workdone);
|
|
|
|
if (error != 0 || workdone != 0)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
#endif
|
2002-08-12 10:32:56 +00:00
|
|
|
|
2004-01-19 20:03:43 +00:00
|
|
|
seqcount = ap->a_ioflag >> IO_SEQSHIFT;
|
2002-08-12 10:32:56 +00:00
|
|
|
ip = VTOI(vp);
|
|
|
|
|
2007-11-08 17:21:51 +00:00
|
|
|
#ifdef INVARIANTS
|
2002-08-12 10:32:56 +00:00
|
|
|
if (uio->uio_rw != UIO_READ)
|
|
|
|
panic("ffs_read: mode");
|
|
|
|
|
|
|
|
if (vp->v_type == VLNK) {
|
|
|
|
if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
|
|
|
|
panic("ffs_read: short symlink");
|
|
|
|
} else if (vp->v_type != VREG && vp->v_type != VDIR)
|
|
|
|
panic("ffs_read: type %d", vp->v_type);
|
|
|
|
#endif
|
|
|
|
orig_resid = uio->uio_resid;
|
2012-03-09 17:19:50 +00:00
|
|
|
KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
|
2004-01-23 05:38:02 +00:00
|
|
|
if (orig_resid == 0)
|
2002-08-12 10:32:56 +00:00
|
|
|
return (0);
|
2012-03-09 17:19:50 +00:00
|
|
|
KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
|
2016-09-17 16:47:34 +00:00
|
|
|
fs = ITOFS(ip);
|
2004-01-23 05:38:02 +00:00
|
|
|
if (uio->uio_offset < ip->i_size &&
|
|
|
|
uio->uio_offset >= fs->fs_maxfilesize)
|
|
|
|
return (EOVERFLOW);
|
2002-08-12 10:32:56 +00:00
|
|
|
|
2018-05-13 09:47:28 +00:00
|
|
|
bflag = GB_UNMAPPED | (uio->uio_segflg == UIO_NOCOPY ? 0 : GB_NOSPARSE);
|
2002-08-12 10:32:56 +00:00
|
|
|
for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
|
|
|
|
if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
|
|
|
|
break;
|
|
|
|
lbn = lblkno(fs, uio->uio_offset);
|
|
|
|
nextlbn = lbn + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* size of buffer. The buffer representing the
|
|
|
|
* end of the file is rounded up to the size of
|
2008-01-02 01:19:17 +00:00
|
|
|
* the block type ( fragment or full block,
|
2002-08-12 10:32:56 +00:00
|
|
|
* depending ).
|
|
|
|
*/
|
|
|
|
size = blksize(fs, ip, lbn);
|
|
|
|
blkoffset = blkoff(fs, uio->uio_offset);
|
2008-01-02 01:19:17 +00:00
|
|
|
|
2002-08-12 10:32:56 +00:00
|
|
|
/*
|
|
|
|
* The amount we want to transfer in this iteration is
|
|
|
|
* one FS block less the amount of the data before
|
|
|
|
* our startpoint (duh!)
|
|
|
|
*/
|
|
|
|
xfersize = fs->fs_bsize - blkoffset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* But if we actually want less than the block,
|
|
|
|
* or the file doesn't have a whole block more of data,
|
|
|
|
* then use the lesser number.
|
|
|
|
*/
|
|
|
|
if (uio->uio_resid < xfersize)
|
|
|
|
xfersize = uio->uio_resid;
|
|
|
|
if (bytesinfile < xfersize)
|
|
|
|
xfersize = bytesinfile;
|
|
|
|
|
|
|
|
if (lblktosize(fs, nextlbn) >= ip->i_size) {
|
|
|
|
/*
|
|
|
|
* Don't do readahead if this is the end of the file.
|
|
|
|
*/
|
2018-05-13 09:47:28 +00:00
|
|
|
error = bread_gb(vp, lbn, size, NOCRED, bflag, &bp);
|
2002-08-12 10:32:56 +00:00
|
|
|
} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
|
2008-01-02 01:19:17 +00:00
|
|
|
/*
|
2002-08-12 10:32:56 +00:00
|
|
|
* Otherwise if we are allowed to cluster,
|
|
|
|
* grab as much as we can.
|
|
|
|
*
|
|
|
|
* XXX This may not be a win if we are not
|
|
|
|
* doing sequential access.
|
|
|
|
*/
|
|
|
|
error = cluster_read(vp, ip->i_size, lbn,
|
2013-03-14 20:28:26 +00:00
|
|
|
size, NOCRED, blkoffset + uio->uio_resid,
|
2018-05-13 09:47:28 +00:00
|
|
|
seqcount, bflag, &bp);
|
2002-08-12 10:32:56 +00:00
|
|
|
} else if (seqcount > 1) {
|
|
|
|
/*
|
|
|
|
* If we are NOT allowed to cluster, then
|
|
|
|
* if we appear to be acting sequentially,
|
|
|
|
* fire off a request for a readahead
|
|
|
|
* as well as a read. Note that the 4th and 5th
|
|
|
|
* arguments point to arrays of the size specified in
|
|
|
|
* the 6th argument.
|
|
|
|
*/
|
2013-10-24 00:33:29 +00:00
|
|
|
u_int nextsize = blksize(fs, ip, nextlbn);
|
2019-12-03 23:07:09 +00:00
|
|
|
error = breadn_flags(vp, lbn, lbn, size, &nextlbn,
|
2018-05-13 09:47:28 +00:00
|
|
|
&nextsize, 1, NOCRED, bflag, NULL, &bp);
|
2002-08-12 10:32:56 +00:00
|
|
|
} else {
|
|
|
|
/*
|
2008-01-02 01:19:17 +00:00
|
|
|
* Failing all of the above, just read what the
|
2002-08-12 10:32:56 +00:00
|
|
|
* user asked for. Interestingly, the same as
|
|
|
|
* the first option above.
|
|
|
|
*/
|
2018-05-13 09:47:28 +00:00
|
|
|
error = bread_gb(vp, lbn, size, NOCRED, bflag, &bp);
|
|
|
|
}
|
|
|
|
if (error == EJUSTRETURN) {
|
|
|
|
error = ffs_read_hole(uio, xfersize, &size);
|
|
|
|
if (error == 0)
|
|
|
|
continue;
|
2002-08-12 10:32:56 +00:00
|
|
|
}
|
2018-05-13 09:47:28 +00:00
|
|
|
if (error != 0) {
|
2002-08-12 10:32:56 +00:00
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We should only get non-zero b_resid when an I/O error
|
|
|
|
* has occurred, which should cause us to break above.
|
|
|
|
* However, if the short read did not cause an error,
|
|
|
|
* then we want to ensure that we do not uiomove bad
|
|
|
|
* or uninitialized data.
|
|
|
|
*/
|
|
|
|
size -= bp->b_resid;
|
|
|
|
if (size < xfersize) {
|
|
|
|
if (size == 0)
|
|
|
|
break;
|
|
|
|
xfersize = size;
|
|
|
|
}
|
|
|
|
|
2015-07-23 19:13:41 +00:00
|
|
|
if (buf_mapped(bp)) {
|
2013-03-19 15:08:15 +00:00
|
|
|
error = vn_io_fault_uiomove((char *)bp->b_data +
|
|
|
|
blkoffset, (int)xfersize, uio);
|
|
|
|
} else {
|
|
|
|
error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
|
|
|
|
(int)xfersize, uio);
|
|
|
|
}
|
2002-08-12 10:32:56 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
2016-11-23 17:53:07 +00:00
|
|
|
vfs_bio_brelse(bp, ioflag);
|
2002-08-12 10:32:56 +00:00
|
|
|
}
|
|
|
|
|
2008-01-02 01:19:17 +00:00
|
|
|
/*
|
2002-08-12 10:32:56 +00:00
|
|
|
* This can only happen in the case of an error
|
|
|
|
* because the loop above resets bp to NULL on each iteration
|
|
|
|
* and on normal completion has not set a new value into it.
|
|
|
|
* so it must have come from a 'break' statement
|
|
|
|
*/
|
2016-11-23 17:53:07 +00:00
|
|
|
if (bp != NULL)
|
|
|
|
vfs_bio_brelse(bp, ioflag);
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
if ((error == 0 || uio->uio_resid != orig_resid) &&
|
2020-01-13 02:35:15 +00:00
|
|
|
(vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
|
|
|
|
UFS_INODE_SET_FLAG_SHARED(ip, IN_ACCESS);
|
2002-08-12 10:32:56 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode op for writing.
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2002-08-12 10:32:56 +00:00
|
|
|
ffs_write(ap)
|
|
|
|
struct vop_write_args /* {
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct uio *a_uio;
|
|
|
|
int a_ioflag;
|
|
|
|
struct ucred *a_cred;
|
|
|
|
} */ *ap;
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
struct uio *uio;
|
|
|
|
struct inode *ip;
|
|
|
|
struct fs *fs;
|
|
|
|
struct buf *bp;
|
|
|
|
ufs_lbn_t lbn;
|
|
|
|
off_t osize;
|
2012-02-21 01:05:12 +00:00
|
|
|
ssize_t resid;
|
2002-08-12 10:32:56 +00:00
|
|
|
int seqcount;
|
2012-02-21 01:05:12 +00:00
|
|
|
int blkoffset, error, flags, ioflag, size, xfersize;
|
2002-08-12 10:32:56 +00:00
|
|
|
|
2002-08-13 11:33:01 +00:00
|
|
|
vp = ap->a_vp;
|
|
|
|
uio = ap->a_uio;
|
|
|
|
ioflag = ap->a_ioflag;
|
2002-08-12 10:32:56 +00:00
|
|
|
if (ap->a_ioflag & IO_EXT)
|
2002-08-19 07:01:55 +00:00
|
|
|
#ifdef notyet
|
2002-08-13 11:33:01 +00:00
|
|
|
return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
|
2002-08-19 07:01:55 +00:00
|
|
|
#else
|
2004-01-23 05:52:31 +00:00
|
|
|
panic("ffs_write+IO_EXT");
|
2002-08-19 07:01:55 +00:00
|
|
|
#endif
|
2002-08-12 10:32:56 +00:00
|
|
|
|
2004-01-19 20:03:43 +00:00
|
|
|
seqcount = ap->a_ioflag >> IO_SEQSHIFT;
|
2002-08-12 10:32:56 +00:00
|
|
|
ip = VTOI(vp);
|
|
|
|
|
2007-11-08 17:21:51 +00:00
|
|
|
#ifdef INVARIANTS
|
2002-08-12 10:32:56 +00:00
|
|
|
if (uio->uio_rw != UIO_WRITE)
|
2004-01-23 05:52:31 +00:00
|
|
|
panic("ffs_write: mode");
|
2002-08-12 10:32:56 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
switch (vp->v_type) {
|
|
|
|
case VREG:
|
|
|
|
if (ioflag & IO_APPEND)
|
|
|
|
uio->uio_offset = ip->i_size;
|
2004-02-11 15:27:26 +00:00
|
|
|
if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
|
2002-08-12 10:32:56 +00:00
|
|
|
return (EPERM);
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
case VLNK:
|
|
|
|
break;
|
|
|
|
case VDIR:
|
2004-01-23 05:52:31 +00:00
|
|
|
panic("ffs_write: dir write");
|
2002-08-12 10:32:56 +00:00
|
|
|
break;
|
|
|
|
default:
|
2004-01-23 05:52:31 +00:00
|
|
|
panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
|
2002-08-12 10:32:56 +00:00
|
|
|
(int)uio->uio_offset,
|
|
|
|
(int)uio->uio_resid
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2004-01-27 11:28:38 +00:00
|
|
|
KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
|
|
|
|
KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
|
2016-09-17 16:47:34 +00:00
|
|
|
fs = ITOFS(ip);
|
2004-02-11 15:27:26 +00:00
|
|
|
if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
|
2002-08-12 10:32:56 +00:00
|
|
|
return (EFBIG);
|
|
|
|
/*
|
|
|
|
* Maybe this should be above the vnode op call, but so long as
|
|
|
|
* file servers have no limits, I don't think it matters.
|
|
|
|
*/
|
2010-05-05 16:44:25 +00:00
|
|
|
if (vn_rlimit_fsize(vp, uio, uio->uio_td))
|
|
|
|
return (EFBIG);
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
resid = uio->uio_resid;
|
|
|
|
osize = ip->i_size;
|
2002-10-18 22:52:41 +00:00
|
|
|
if (seqcount > BA_SEQMAX)
|
|
|
|
flags = BA_SEQMAX << BA_SEQSHIFT;
|
|
|
|
else
|
|
|
|
flags = seqcount << BA_SEQSHIFT;
|
2016-09-08 17:40:40 +00:00
|
|
|
if (ioflag & IO_SYNC)
|
2002-10-18 22:52:41 +00:00
|
|
|
flags |= IO_SYNC;
|
2013-03-19 15:08:15 +00:00
|
|
|
flags |= BA_UNMAPPED;
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
for (error = 0; uio->uio_resid > 0;) {
|
|
|
|
lbn = lblkno(fs, uio->uio_offset);
|
|
|
|
blkoffset = blkoff(fs, uio->uio_offset);
|
|
|
|
xfersize = fs->fs_bsize - blkoffset;
|
|
|
|
if (uio->uio_resid < xfersize)
|
|
|
|
xfersize = uio->uio_resid;
|
|
|
|
if (uio->uio_offset + xfersize > ip->i_size)
|
|
|
|
vnode_pager_setsize(vp, uio->uio_offset + xfersize);
|
|
|
|
|
2014-03-02 02:52:34 +00:00
|
|
|
/*
|
2002-08-12 10:32:56 +00:00
|
|
|
* We must perform a read-before-write if the transfer size
|
|
|
|
* does not cover the entire buffer.
|
2014-03-02 02:52:34 +00:00
|
|
|
*/
|
2002-08-12 10:32:56 +00:00
|
|
|
if (fs->fs_bsize > xfersize)
|
|
|
|
flags |= BA_CLRBUF;
|
|
|
|
else
|
|
|
|
flags &= ~BA_CLRBUF;
|
|
|
|
/* XXX is uio->uio_offset the right thing here? */
|
|
|
|
error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
|
|
|
|
ap->a_cred, flags, &bp);
|
2009-01-20 11:30:22 +00:00
|
|
|
if (error != 0) {
|
|
|
|
vnode_pager_setsize(vp, ip->i_size);
|
2002-08-12 10:32:56 +00:00
|
|
|
break;
|
2009-01-20 11:30:22 +00:00
|
|
|
}
|
2004-05-21 12:05:48 +00:00
|
|
|
if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
|
|
|
|
bp->b_flags |= B_NOCACHE;
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
if (uio->uio_offset + xfersize > ip->i_size) {
|
|
|
|
ip->i_size = uio->uio_offset + xfersize;
|
2004-07-28 06:41:27 +00:00
|
|
|
DIP_SET(ip, i_size, ip->i_size);
|
2020-06-05 01:00:55 +00:00
|
|
|
UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
|
2002-08-12 10:32:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size = blksize(fs, ip, lbn) - bp->b_resid;
|
|
|
|
if (size < xfersize)
|
|
|
|
xfersize = size;
|
|
|
|
|
2015-07-23 19:13:41 +00:00
|
|
|
if (buf_mapped(bp)) {
|
2013-03-19 15:08:15 +00:00
|
|
|
error = vn_io_fault_uiomove((char *)bp->b_data +
|
|
|
|
blkoffset, (int)xfersize, uio);
|
|
|
|
} else {
|
|
|
|
error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
|
|
|
|
(int)xfersize, uio);
|
|
|
|
}
|
2012-02-09 22:34:16 +00:00
|
|
|
/*
|
|
|
|
* If the buffer is not already filled and we encounter an
|
|
|
|
* error while trying to fill it, we have to clear out any
|
|
|
|
* garbage data from the pages instantiated for the buffer.
|
|
|
|
* If we do not, a failed uiomove() during a write can leave
|
|
|
|
* the prior contents of the pages exposed to a userland mmap.
|
|
|
|
*
|
|
|
|
* Note that we need only clear buffers with a transfer size
|
|
|
|
* equal to the block size because buffers with a shorter
|
|
|
|
* transfer size were cleared above by the call to UFS_BALLOC()
|
|
|
|
* with the BA_CLRBUF flag set.
|
|
|
|
*
|
|
|
|
* If the source region for uiomove identically mmaps the
|
|
|
|
* buffer, uiomove() performed the NOP copy, and the buffer
|
|
|
|
* content remains valid because the page fault handler
|
|
|
|
* validated the pages.
|
|
|
|
*/
|
|
|
|
if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
|
|
|
|
fs->fs_bsize == xfersize)
|
|
|
|
vfs_bio_clrbuf(bp);
|
2016-11-23 17:53:07 +00:00
|
|
|
|
|
|
|
vfs_bio_set_flags(bp, ioflag);
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If IO_SYNC each buffer is written synchronously. Otherwise
|
2008-01-02 01:19:17 +00:00
|
|
|
* if we have a severe page deficiency write the buffer
|
2002-08-12 10:32:56 +00:00
|
|
|
* asynchronously. Otherwise try to cluster, and if that
|
|
|
|
* doesn't do it then either do an async write (if O_DIRECT),
|
|
|
|
* or a delayed write (if not).
|
|
|
|
*/
|
|
|
|
if (ioflag & IO_SYNC) {
|
|
|
|
(void)bwrite(bp);
|
|
|
|
} else if (vm_page_count_severe() ||
|
|
|
|
buf_dirty_count_severe() ||
|
|
|
|
(ioflag & IO_ASYNC)) {
|
|
|
|
bp->b_flags |= B_CLUSTEROK;
|
|
|
|
bawrite(bp);
|
|
|
|
} else if (xfersize + blkoffset == fs->fs_bsize) {
|
|
|
|
if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
|
|
|
|
bp->b_flags |= B_CLUSTEROK;
|
2013-03-14 20:28:26 +00:00
|
|
|
cluster_write(vp, bp, ip->i_size, seqcount,
|
2013-03-19 15:08:15 +00:00
|
|
|
GB_UNMAPPED);
|
2002-08-12 10:32:56 +00:00
|
|
|
} else {
|
|
|
|
bawrite(bp);
|
|
|
|
}
|
|
|
|
} else if (ioflag & IO_DIRECT) {
|
|
|
|
bp->b_flags |= B_CLUSTEROK;
|
|
|
|
bawrite(bp);
|
|
|
|
} else {
|
|
|
|
bp->b_flags |= B_CLUSTEROK;
|
|
|
|
bdwrite(bp);
|
|
|
|
}
|
|
|
|
if (error || xfersize == 0)
|
|
|
|
break;
|
2020-01-13 02:31:51 +00:00
|
|
|
UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
|
2002-08-12 10:32:56 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we successfully wrote any data, and we are not the superuser
|
|
|
|
* we clear the setuid and setgid bits as a precaution against
|
|
|
|
* tampering.
|
|
|
|
*/
|
2018-03-17 12:59:55 +00:00
|
|
|
if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
|
2007-03-01 20:38:24 +00:00
|
|
|
ap->a_cred) {
|
2018-12-11 19:32:16 +00:00
|
|
|
if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID)) {
|
2020-07-25 10:38:05 +00:00
|
|
|
vn_seqc_write_begin(vp);
|
|
|
|
UFS_INODE_SET_MODE(ip, ip->i_mode & ~(ISUID | ISGID));
|
2007-03-01 20:38:24 +00:00
|
|
|
DIP_SET(ip, i_mode, ip->i_mode);
|
2020-07-25 10:38:05 +00:00
|
|
|
vn_seqc_write_end(vp);
|
2007-03-01 20:38:24 +00:00
|
|
|
}
|
2002-08-12 10:32:56 +00:00
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
if (ioflag & IO_UNIT) {
|
2005-02-08 17:40:01 +00:00
|
|
|
(void)ffs_truncate(vp, osize,
|
2012-04-23 13:21:28 +00:00
|
|
|
IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
|
2002-08-12 10:32:56 +00:00
|
|
|
uio->uio_offset -= resid - uio->uio_resid;
|
|
|
|
uio->uio_resid = resid;
|
|
|
|
}
|
This commit enables a UFS filesystem to do a forcible unmount when
the underlying media fails or becomes inaccessible. For example
when a USB flash memory card hosting a UFS filesystem is unplugged.
The strategy for handling disk I/O errors when soft updates are
enabled is to stop writing to the disk of the affected file system
but continue to accept I/O requests and report that all future
writes by the file system to that disk actually succeed. Then
initiate an asynchronous forced unmount of the affected file system.
There are two cases for disk I/O errors:
- ENXIO, which means that this disk is gone and the lower layers
of the storage stack already guarantee that no future I/O to
this disk will succeed.
- EIO (or most other errors), which means that this particular
I/O request has failed but subsequent I/O requests to this
disk might still succeed.
For ENXIO, we can just clear the error and continue, because we
know that the file system cannot affect the on-disk state after we
see this error. For EIO or other errors, we arrange for the geom_vfs
layer to reject all future I/O requests with ENXIO just like is
done when the geom_vfs is orphaned. In both cases, the file system
code can just clear the error and proceed with the forcible unmount.
This new treatment of I/O errors is needed for writes of any buffer
that is involved in a dependency. Most dependencies are described
by a structure attached to the buffer's b_dep field. But some are
created and processed as a result of the completion of the dependencies
attached to the buffer.
Clearing of some dependencies require a read. For example if there
is a dependency that requires an inode to be written, the disk block
containing that inode must be read, the updated inode copied into
place in that buffer, and the buffer then written back to disk.
Often the needed buffer is already in memory and can be used. But
if it needs to be read from the disk, the read will fail, so we
fabricate a buffer full of zeroes and pretend that the read succeeded.
This zero'ed buffer can be updated and written back to disk.
The only case where a buffer full of zeros causes the code to do
the wrong thing is when reading an inode buffer containing an inode
that still has an inode dependency in memory that will reinitialize
the effective link count (i_effnlink) based on the actual link count
(i_nlink) that we read. To handle this case we now store the i_nlink
value that we wrote in the inode dependency so that it can be
restored into the zero'ed buffer thus keeping the tracking of the
inode link count consistent.
Because applications depend on knowing when an attempt to write
their data to stable storage has failed, the fsync(2) and msync(2)
system calls need to return errors if data fails to be written to
stable storage. So these operations return ENXIO for every call
made on files in a file system where we have otherwise been ignoring
I/O errors.
Coauthered by: mckusick
Reviewed by: kib
Tested by: Peter Holm
Approved by: mckusick (mentor)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D24088
2020-05-25 23:47:31 +00:00
|
|
|
} else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
|
2005-02-08 17:40:01 +00:00
|
|
|
error = ffs_update(vp, 1);
|
This commit enables a UFS filesystem to do a forcible unmount when
the underlying media fails or becomes inaccessible. For example
when a USB flash memory card hosting a UFS filesystem is unplugged.
The strategy for handling disk I/O errors when soft updates are
enabled is to stop writing to the disk of the affected file system
but continue to accept I/O requests and report that all future
writes by the file system to that disk actually succeed. Then
initiate an asynchronous forced unmount of the affected file system.
There are two cases for disk I/O errors:
- ENXIO, which means that this disk is gone and the lower layers
of the storage stack already guarantee that no future I/O to
this disk will succeed.
- EIO (or most other errors), which means that this particular
I/O request has failed but subsequent I/O requests to this
disk might still succeed.
For ENXIO, we can just clear the error and continue, because we
know that the file system cannot affect the on-disk state after we
see this error. For EIO or other errors, we arrange for the geom_vfs
layer to reject all future I/O requests with ENXIO just like is
done when the geom_vfs is orphaned. In both cases, the file system
code can just clear the error and proceed with the forcible unmount.
This new treatment of I/O errors is needed for writes of any buffer
that is involved in a dependency. Most dependencies are described
by a structure attached to the buffer's b_dep field. But some are
created and processed as a result of the completion of the dependencies
attached to the buffer.
Clearing of some dependencies require a read. For example if there
is a dependency that requires an inode to be written, the disk block
containing that inode must be read, the updated inode copied into
place in that buffer, and the buffer then written back to disk.
Often the needed buffer is already in memory and can be used. But
if it needs to be read from the disk, the read will fail, so we
fabricate a buffer full of zeroes and pretend that the read succeeded.
This zero'ed buffer can be updated and written back to disk.
The only case where a buffer full of zeros causes the code to do
the wrong thing is when reading an inode buffer containing an inode
that still has an inode dependency in memory that will reinitialize
the effective link count (i_effnlink) based on the actual link count
(i_nlink) that we read. To handle this case we now store the i_nlink
value that we wrote in the inode dependency so that it can be
restored into the zero'ed buffer thus keeping the tracking of the
inode link count consistent.
Because applications depend on knowing when an attempt to write
their data to stable storage has failed, the fsync(2) and msync(2)
system calls need to return errors if data fails to be written to
stable storage. So these operations return ENXIO for every call
made on files in a file system where we have otherwise been ignoring
I/O errors.
Coauthered by: mckusick
Reviewed by: kib
Tested by: Peter Holm
Approved by: mckusick (mentor)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D24088
2020-05-25 23:47:31 +00:00
|
|
|
if (ffs_fsfail_cleanup(VFSTOUFS(vp->v_mount), error))
|
|
|
|
error = ENXIO;
|
|
|
|
}
|
2002-08-12 10:32:56 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-09-05 20:59:42 +00:00
|
|
|
* Extended attribute area reading.
|
2002-08-12 10:32:56 +00:00
|
|
|
*/
|
|
|
|
static int
|
2002-08-19 07:01:55 +00:00
|
|
|
ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
|
2002-08-12 10:32:56 +00:00
|
|
|
{
|
|
|
|
struct inode *ip;
|
|
|
|
struct ufs2_dinode *dp;
|
|
|
|
struct fs *fs;
|
|
|
|
struct buf *bp;
|
|
|
|
ufs_lbn_t lbn, nextlbn;
|
|
|
|
off_t bytesinfile;
|
|
|
|
long size, xfersize, blkoffset;
|
2012-02-21 01:05:12 +00:00
|
|
|
ssize_t orig_resid;
|
|
|
|
int error;
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
ip = VTOI(vp);
|
2016-09-17 16:47:34 +00:00
|
|
|
fs = ITOFS(ip);
|
2002-08-12 10:32:56 +00:00
|
|
|
dp = ip->i_din2;
|
|
|
|
|
2007-11-08 17:21:51 +00:00
|
|
|
#ifdef INVARIANTS
|
2002-08-12 10:32:56 +00:00
|
|
|
if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
|
|
|
|
panic("ffs_extread: mode");
|
|
|
|
|
|
|
|
#endif
|
|
|
|
orig_resid = uio->uio_resid;
|
2004-01-27 11:28:38 +00:00
|
|
|
KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
|
2004-01-23 06:00:41 +00:00
|
|
|
if (orig_resid == 0)
|
2002-08-12 10:32:56 +00:00
|
|
|
return (0);
|
2004-01-27 11:28:38 +00:00
|
|
|
KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
|
|
|
|
if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
|
|
|
|
break;
|
|
|
|
lbn = lblkno(fs, uio->uio_offset);
|
|
|
|
nextlbn = lbn + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* size of buffer. The buffer representing the
|
|
|
|
* end of the file is rounded up to the size of
|
2008-01-02 01:19:17 +00:00
|
|
|
* the block type ( fragment or full block,
|
2002-08-12 10:32:56 +00:00
|
|
|
* depending ).
|
|
|
|
*/
|
|
|
|
size = sblksize(fs, dp->di_extsize, lbn);
|
|
|
|
blkoffset = blkoff(fs, uio->uio_offset);
|
2008-01-02 01:19:17 +00:00
|
|
|
|
2002-08-12 10:32:56 +00:00
|
|
|
/*
|
|
|
|
* The amount we want to transfer in this iteration is
|
|
|
|
* one FS block less the amount of the data before
|
|
|
|
* our startpoint (duh!)
|
|
|
|
*/
|
|
|
|
xfersize = fs->fs_bsize - blkoffset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* But if we actually want less than the block,
|
|
|
|
* or the file doesn't have a whole block more of data,
|
|
|
|
* then use the lesser number.
|
|
|
|
*/
|
|
|
|
if (uio->uio_resid < xfersize)
|
|
|
|
xfersize = uio->uio_resid;
|
|
|
|
if (bytesinfile < xfersize)
|
|
|
|
xfersize = bytesinfile;
|
|
|
|
|
|
|
|
if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
|
|
|
|
/*
|
|
|
|
* Don't do readahead if this is the end of the info.
|
|
|
|
*/
|
|
|
|
error = bread(vp, -1 - lbn, size, NOCRED, &bp);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If we have a second block, then
|
|
|
|
* fire off a request for a readahead
|
|
|
|
* as well as a read. Note that the 4th and 5th
|
|
|
|
* arguments point to arrays of the size specified in
|
|
|
|
* the 6th argument.
|
|
|
|
*/
|
2013-10-24 00:33:29 +00:00
|
|
|
u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
nextlbn = -1 - nextlbn;
|
|
|
|
error = breadn(vp, -1 - lbn,
|
|
|
|
size, &nextlbn, &nextsize, 1, NOCRED, &bp);
|
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We should only get non-zero b_resid when an I/O error
|
|
|
|
* has occurred, which should cause us to break above.
|
|
|
|
* However, if the short read did not cause an error,
|
|
|
|
* then we want to ensure that we do not uiomove bad
|
|
|
|
* or uninitialized data.
|
|
|
|
*/
|
|
|
|
size -= bp->b_resid;
|
|
|
|
if (size < xfersize) {
|
|
|
|
if (size == 0)
|
|
|
|
break;
|
|
|
|
xfersize = size;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = uiomove((char *)bp->b_data + blkoffset,
|
|
|
|
(int)xfersize, uio);
|
|
|
|
if (error)
|
|
|
|
break;
|
2016-11-23 17:53:07 +00:00
|
|
|
vfs_bio_brelse(bp, ioflag);
|
2002-08-12 10:32:56 +00:00
|
|
|
}
|
|
|
|
|
2008-01-02 01:19:17 +00:00
|
|
|
/*
|
2002-08-12 10:32:56 +00:00
|
|
|
* This can only happen in the case of an error
|
|
|
|
* because the loop above resets bp to NULL on each iteration
|
|
|
|
* and on normal completion has not set a new value into it.
|
|
|
|
* so it must have come from a 'break' statement
|
|
|
|
*/
|
2016-11-23 17:53:07 +00:00
|
|
|
if (bp != NULL)
|
|
|
|
vfs_bio_brelse(bp, ioflag);
|
2002-08-12 10:32:56 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-09-05 20:59:42 +00:00
|
|
|
* Extended attribute area writing.
|
2002-08-12 10:32:56 +00:00
|
|
|
*/
|
|
|
|
static int
|
2002-08-13 11:33:01 +00:00
|
|
|
ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
|
2002-08-12 10:32:56 +00:00
|
|
|
{
|
|
|
|
struct inode *ip;
|
|
|
|
struct ufs2_dinode *dp;
|
|
|
|
struct fs *fs;
|
|
|
|
struct buf *bp;
|
|
|
|
ufs_lbn_t lbn;
|
|
|
|
off_t osize;
|
2012-02-21 01:05:12 +00:00
|
|
|
ssize_t resid;
|
|
|
|
int blkoffset, error, flags, size, xfersize;
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
ip = VTOI(vp);
|
2016-09-17 16:47:34 +00:00
|
|
|
fs = ITOFS(ip);
|
2002-08-12 10:32:56 +00:00
|
|
|
dp = ip->i_din2;
|
|
|
|
|
2007-11-08 17:21:51 +00:00
|
|
|
#ifdef INVARIANTS
|
2002-08-12 10:32:56 +00:00
|
|
|
if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
|
2004-01-23 05:52:31 +00:00
|
|
|
panic("ffs_extwrite: mode");
|
2002-08-12 10:32:56 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (ioflag & IO_APPEND)
|
|
|
|
uio->uio_offset = dp->di_extsize;
|
2004-01-27 11:28:38 +00:00
|
|
|
KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
|
|
|
|
KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
|
2017-02-15 19:50:26 +00:00
|
|
|
if ((uoff_t)uio->uio_offset + uio->uio_resid >
|
|
|
|
UFS_NXADDR * fs->fs_bsize)
|
2002-08-12 10:32:56 +00:00
|
|
|
return (EFBIG);
|
|
|
|
|
|
|
|
resid = uio->uio_resid;
|
|
|
|
osize = dp->di_extsize;
|
|
|
|
flags = IO_EXT;
|
2016-09-08 17:40:40 +00:00
|
|
|
if (ioflag & IO_SYNC)
|
2002-08-12 10:32:56 +00:00
|
|
|
flags |= IO_SYNC;
|
|
|
|
|
|
|
|
for (error = 0; uio->uio_resid > 0;) {
|
|
|
|
lbn = lblkno(fs, uio->uio_offset);
|
|
|
|
blkoffset = blkoff(fs, uio->uio_offset);
|
|
|
|
xfersize = fs->fs_bsize - blkoffset;
|
|
|
|
if (uio->uio_resid < xfersize)
|
|
|
|
xfersize = uio->uio_resid;
|
|
|
|
|
2008-01-02 01:19:17 +00:00
|
|
|
/*
|
2002-08-12 10:32:56 +00:00
|
|
|
* We must perform a read-before-write if the transfer size
|
|
|
|
* does not cover the entire buffer.
|
2014-03-02 02:52:34 +00:00
|
|
|
*/
|
2002-08-12 10:32:56 +00:00
|
|
|
if (fs->fs_bsize > xfersize)
|
|
|
|
flags |= BA_CLRBUF;
|
|
|
|
else
|
|
|
|
flags &= ~BA_CLRBUF;
|
|
|
|
error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
|
2002-08-13 11:33:01 +00:00
|
|
|
ucred, flags, &bp);
|
2002-08-12 10:32:56 +00:00
|
|
|
if (error != 0)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* If the buffer is not valid we have to clear out any
|
|
|
|
* garbage data from the pages instantiated for the buffer.
|
|
|
|
* If we do not, a failed uiomove() during a write can leave
|
|
|
|
* the prior contents of the pages exposed to a userland
|
|
|
|
* mmap(). XXX deal with uiomove() errors a better way.
|
|
|
|
*/
|
|
|
|
if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
|
|
|
|
vfs_bio_clrbuf(bp);
|
|
|
|
|
2020-06-05 01:00:55 +00:00
|
|
|
if (uio->uio_offset + xfersize > dp->di_extsize) {
|
2002-08-12 10:32:56 +00:00
|
|
|
dp->di_extsize = uio->uio_offset + xfersize;
|
2020-06-05 01:00:55 +00:00
|
|
|
UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
|
|
|
|
}
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
|
|
|
|
if (size < xfersize)
|
|
|
|
xfersize = size;
|
|
|
|
|
|
|
|
error =
|
|
|
|
uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
|
2016-11-23 17:53:07 +00:00
|
|
|
|
|
|
|
vfs_bio_set_flags(bp, ioflag);
|
2002-08-12 10:32:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If IO_SYNC each buffer is written synchronously. Otherwise
|
2008-01-02 01:19:17 +00:00
|
|
|
* if we have a severe page deficiency write the buffer
|
2002-08-12 10:32:56 +00:00
|
|
|
* asynchronously. Otherwise try to cluster, and if that
|
|
|
|
* doesn't do it then either do an async write (if O_DIRECT),
|
|
|
|
* or a delayed write (if not).
|
|
|
|
*/
|
|
|
|
if (ioflag & IO_SYNC) {
|
|
|
|
(void)bwrite(bp);
|
|
|
|
} else if (vm_page_count_severe() ||
|
|
|
|
buf_dirty_count_severe() ||
|
|
|
|
xfersize + blkoffset == fs->fs_bsize ||
|
|
|
|
(ioflag & (IO_ASYNC | IO_DIRECT)))
|
|
|
|
bawrite(bp);
|
|
|
|
else
|
|
|
|
bdwrite(bp);
|
|
|
|
if (error || xfersize == 0)
|
|
|
|
break;
|
2020-01-13 02:31:51 +00:00
|
|
|
UFS_INODE_SET_FLAG(ip, IN_CHANGE);
|
2002-08-12 10:32:56 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we successfully wrote any data, and we are not the superuser
|
|
|
|
* we clear the setuid and setgid bits as a precaution against
|
|
|
|
* tampering.
|
|
|
|
*/
|
2018-03-17 12:59:55 +00:00
|
|
|
if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
|
2018-12-11 19:32:16 +00:00
|
|
|
if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID)) {
|
2020-07-25 10:38:05 +00:00
|
|
|
vn_seqc_write_begin(vp);
|
|
|
|
UFS_INODE_SET_MODE(ip, ip->i_mode & ~(ISUID | ISGID));
|
2007-03-01 20:38:24 +00:00
|
|
|
dp->di_mode = ip->i_mode;
|
2020-07-25 10:38:05 +00:00
|
|
|
vn_seqc_write_end(vp);
|
2007-03-01 20:38:24 +00:00
|
|
|
}
|
2002-08-12 10:32:56 +00:00
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
if (ioflag & IO_UNIT) {
|
2005-02-08 17:40:01 +00:00
|
|
|
(void)ffs_truncate(vp, osize,
|
2012-04-23 13:21:28 +00:00
|
|
|
IO_EXT | (ioflag&IO_SYNC), ucred);
|
2002-08-12 10:32:56 +00:00
|
|
|
uio->uio_offset -= resid - uio->uio_resid;
|
|
|
|
uio->uio_resid = resid;
|
|
|
|
}
|
|
|
|
} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
|
2005-02-08 17:40:01 +00:00
|
|
|
error = ffs_update(vp, 1);
|
2002-08-12 10:32:56 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2002-08-13 10:33:57 +00:00
|
|
|
|
2002-08-19 07:01:55 +00:00
|
|
|
/*
|
|
|
|
* Vnode operating to retrieve a named extended attribute.
|
|
|
|
*
|
|
|
|
* Locate a particular EA (nspace:name) in the area (ptr:length), and return
|
|
|
|
* the length of the EA, and possibly the pointer to the entry and to the data.
|
|
|
|
*/
|
|
|
|
static int
|
2017-01-19 16:46:05 +00:00
|
|
|
ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name,
|
|
|
|
struct extattr **eapp, u_char **eac)
|
2002-08-19 07:01:55 +00:00
|
|
|
{
|
2017-01-19 16:46:05 +00:00
|
|
|
struct extattr *eap, *eaend;
|
|
|
|
size_t nlen;
|
2002-08-19 07:01:55 +00:00
|
|
|
|
|
|
|
nlen = strlen(name);
|
2017-01-19 16:46:05 +00:00
|
|
|
KASSERT(ALIGNED_TO(ptr, struct extattr), ("unaligned"));
|
|
|
|
eap = (struct extattr *)ptr;
|
|
|
|
eaend = (struct extattr *)(ptr + length);
|
|
|
|
for (; eap < eaend; eap = EXTATTR_NEXT(eap)) {
|
2020-10-30 19:00:42 +00:00
|
|
|
KASSERT(EXTATTR_NEXT(eap) <= eaend,
|
|
|
|
("extattr next %p beyond %p", EXTATTR_NEXT(eap), eaend));
|
2017-01-19 16:46:05 +00:00
|
|
|
if (eap->ea_namespace != nspace || eap->ea_namelength != nlen
|
|
|
|
|| memcmp(eap->ea_name, name, nlen) != 0)
|
2002-08-19 07:01:55 +00:00
|
|
|
continue;
|
2017-01-19 16:46:05 +00:00
|
|
|
if (eapp != NULL)
|
|
|
|
*eapp = eap;
|
2002-08-19 07:01:55 +00:00
|
|
|
if (eac != NULL)
|
2017-01-19 16:46:05 +00:00
|
|
|
*eac = EXTATTR_CONTENT(eap);
|
|
|
|
return (EXTATTR_CONTENT_SIZE(eap));
|
2002-08-19 07:01:55 +00:00
|
|
|
}
|
2017-01-19 16:46:05 +00:00
|
|
|
return (-1);
|
2002-08-19 07:01:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-10-30 19:00:42 +00:00
|
|
|
ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td)
|
2002-08-19 07:01:55 +00:00
|
|
|
{
|
2020-10-30 19:00:42 +00:00
|
|
|
const struct extattr *eap, *eaend, *eapnext;
|
2002-08-19 07:01:55 +00:00
|
|
|
struct inode *ip;
|
|
|
|
struct ufs2_dinode *dp;
|
2007-07-13 18:51:08 +00:00
|
|
|
struct fs *fs;
|
2002-08-19 07:01:55 +00:00
|
|
|
struct uio luio;
|
|
|
|
struct iovec liovec;
|
2013-10-24 00:33:29 +00:00
|
|
|
u_int easize;
|
|
|
|
int error;
|
2002-08-19 07:01:55 +00:00
|
|
|
u_char *eae;
|
|
|
|
|
|
|
|
ip = VTOI(vp);
|
2016-09-17 16:47:34 +00:00
|
|
|
fs = ITOFS(ip);
|
2002-08-19 07:01:55 +00:00
|
|
|
dp = ip->i_din2;
|
|
|
|
easize = dp->di_extsize;
|
2020-10-30 19:00:42 +00:00
|
|
|
if ((uoff_t)easize > UFS_NXADDR * fs->fs_bsize)
|
2007-07-13 18:51:08 +00:00
|
|
|
return (EFBIG);
|
2002-08-19 07:01:55 +00:00
|
|
|
|
2020-10-30 19:00:42 +00:00
|
|
|
eae = malloc(easize, M_TEMP, M_WAITOK);
|
2002-08-19 07:01:55 +00:00
|
|
|
|
|
|
|
liovec.iov_base = eae;
|
|
|
|
liovec.iov_len = easize;
|
|
|
|
luio.uio_iov = &liovec;
|
|
|
|
luio.uio_iovcnt = 1;
|
|
|
|
luio.uio_offset = 0;
|
|
|
|
luio.uio_resid = easize;
|
|
|
|
luio.uio_segflg = UIO_SYSSPACE;
|
|
|
|
luio.uio_rw = UIO_READ;
|
|
|
|
luio.uio_td = td;
|
|
|
|
|
|
|
|
error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
|
|
|
|
if (error) {
|
|
|
|
free(eae, M_TEMP);
|
2020-10-30 19:00:42 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
/* Validate disk xattrfile contents. */
|
|
|
|
for (eap = (void *)eae, eaend = (void *)(eae + easize); eap < eaend;
|
|
|
|
eap = eapnext) {
|
|
|
|
eapnext = EXTATTR_NEXT(eap);
|
|
|
|
/* Bogusly short entry or bogusly long entry. */
|
|
|
|
if (eap->ea_length < sizeof(*eap) || eapnext > eaend) {
|
|
|
|
free(eae, M_TEMP);
|
|
|
|
return (EINTEGRITY);
|
|
|
|
}
|
2002-08-19 07:01:55 +00:00
|
|
|
}
|
|
|
|
*p = eae;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-03-12 12:43:56 +00:00
|
|
|
static void
|
|
|
|
ffs_lock_ea(struct vnode *vp)
|
|
|
|
{
|
|
|
|
struct inode *ip;
|
|
|
|
|
|
|
|
ip = VTOI(vp);
|
|
|
|
VI_LOCK(vp);
|
|
|
|
while (ip->i_flag & IN_EA_LOCKED) {
|
2020-01-13 02:31:51 +00:00
|
|
|
UFS_INODE_SET_FLAG(ip, IN_EA_LOCKWAIT);
|
2009-03-12 12:43:56 +00:00
|
|
|
msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
|
|
|
|
0);
|
|
|
|
}
|
2020-01-13 02:31:51 +00:00
|
|
|
UFS_INODE_SET_FLAG(ip, IN_EA_LOCKED);
|
2009-03-12 12:43:56 +00:00
|
|
|
VI_UNLOCK(vp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ffs_unlock_ea(struct vnode *vp)
|
|
|
|
{
|
|
|
|
struct inode *ip;
|
|
|
|
|
|
|
|
ip = VTOI(vp);
|
|
|
|
VI_LOCK(vp);
|
|
|
|
if (ip->i_flag & IN_EA_LOCKWAIT)
|
|
|
|
wakeup(&ip->i_ea_refs);
|
|
|
|
ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
|
|
|
|
VI_UNLOCK(vp);
|
|
|
|
}
|
|
|
|
|
2002-09-05 20:59:42 +00:00
|
|
|
static int
|
|
|
|
ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
|
|
|
|
{
|
|
|
|
struct inode *ip;
|
|
|
|
struct ufs2_dinode *dp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
ip = VTOI(vp);
|
|
|
|
|
2009-03-12 12:43:56 +00:00
|
|
|
ffs_lock_ea(vp);
|
|
|
|
if (ip->i_ea_area != NULL) {
|
|
|
|
ip->i_ea_refs++;
|
|
|
|
ffs_unlock_ea(vp);
|
|
|
|
return (0);
|
|
|
|
}
|
2002-09-05 20:59:42 +00:00
|
|
|
dp = ip->i_din2;
|
2020-10-30 19:00:42 +00:00
|
|
|
error = ffs_rdextattr(&ip->i_ea_area, vp, td);
|
2009-03-12 12:43:56 +00:00
|
|
|
if (error) {
|
|
|
|
ffs_unlock_ea(vp);
|
2002-09-05 20:59:42 +00:00
|
|
|
return (error);
|
2009-03-12 12:43:56 +00:00
|
|
|
}
|
2002-09-05 20:59:42 +00:00
|
|
|
ip->i_ea_len = dp->di_extsize;
|
|
|
|
ip->i_ea_error = 0;
|
2009-03-12 12:43:56 +00:00
|
|
|
ip->i_ea_refs++;
|
|
|
|
ffs_unlock_ea(vp);
|
2002-09-05 20:59:42 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2002-08-13 10:33:57 +00:00
|
|
|
/*
|
2002-09-05 20:59:42 +00:00
|
|
|
* Vnode extattr transaction commit/abort
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
|
|
|
|
{
|
|
|
|
struct inode *ip;
|
|
|
|
struct uio luio;
|
|
|
|
struct iovec liovec;
|
|
|
|
int error;
|
|
|
|
struct ufs2_dinode *dp;
|
|
|
|
|
|
|
|
ip = VTOI(vp);
|
2009-03-12 12:43:56 +00:00
|
|
|
|
|
|
|
ffs_lock_ea(vp);
|
|
|
|
if (ip->i_ea_area == NULL) {
|
|
|
|
ffs_unlock_ea(vp);
|
2002-09-05 20:59:42 +00:00
|
|
|
return (EINVAL);
|
2009-03-12 12:43:56 +00:00
|
|
|
}
|
2002-09-05 20:59:42 +00:00
|
|
|
dp = ip->i_din2;
|
|
|
|
error = ip->i_ea_error;
|
|
|
|
if (commit && error == 0) {
|
2009-03-12 12:43:56 +00:00
|
|
|
ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
|
2002-09-27 20:00:03 +00:00
|
|
|
if (cred == NOCRED)
|
|
|
|
cred = vp->v_mount->mnt_cred;
|
2002-09-05 20:59:42 +00:00
|
|
|
liovec.iov_base = ip->i_ea_area;
|
|
|
|
liovec.iov_len = ip->i_ea_len;
|
|
|
|
luio.uio_iov = &liovec;
|
|
|
|
luio.uio_iovcnt = 1;
|
|
|
|
luio.uio_offset = 0;
|
|
|
|
luio.uio_resid = ip->i_ea_len;
|
|
|
|
luio.uio_segflg = UIO_SYSSPACE;
|
|
|
|
luio.uio_rw = UIO_WRITE;
|
|
|
|
luio.uio_td = td;
|
|
|
|
/* XXX: I'm not happy about truncating to zero size */
|
|
|
|
if (ip->i_ea_len < dp->di_extsize)
|
2012-04-23 13:21:28 +00:00
|
|
|
error = ffs_truncate(vp, 0, IO_EXT, cred);
|
2002-09-05 20:59:42 +00:00
|
|
|
error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
|
|
|
|
}
|
2009-03-12 12:43:56 +00:00
|
|
|
if (--ip->i_ea_refs == 0) {
|
|
|
|
free(ip->i_ea_area, M_TEMP);
|
|
|
|
ip->i_ea_area = NULL;
|
|
|
|
ip->i_ea_len = 0;
|
|
|
|
ip->i_ea_error = 0;
|
|
|
|
}
|
|
|
|
ffs_unlock_ea(vp);
|
2002-09-05 20:59:42 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2002-10-14 23:18:09 +00:00
|
|
|
/*
|
2004-10-29 10:15:56 +00:00
|
|
|
* Vnode extattr strategy routine for fifos.
|
2002-10-14 23:18:09 +00:00
|
|
|
*
|
|
|
|
* We need to check for a read or write of the external attributes.
|
|
|
|
* Otherwise we just fall through and do the usual thing.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffsext_strategy(struct vop_strategy_args *ap)
|
|
|
|
/*
|
|
|
|
struct vop_strategy_args {
|
|
|
|
struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
struct buf *a_bp;
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
daddr_t lbn;
|
|
|
|
|
|
|
|
vp = ap->a_vp;
|
|
|
|
lbn = ap->a_bp->b_lblkno;
|
2017-02-15 19:50:26 +00:00
|
|
|
if (I_IS_UFS2(VTOI(vp)) && lbn < 0 && lbn >= -UFS_NXADDR)
|
2005-02-08 15:40:11 +00:00
|
|
|
return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
|
2002-10-14 23:18:09 +00:00
|
|
|
if (vp->v_type == VFIFO)
|
2005-02-08 15:40:11 +00:00
|
|
|
return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
|
2004-09-28 13:30:58 +00:00
|
|
|
panic("spec nodes went here");
|
2002-10-14 23:18:09 +00:00
|
|
|
}
|
|
|
|
|
2002-09-05 20:59:42 +00:00
|
|
|
/*
|
|
|
|
* Vnode extattr transaction commit/abort
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2002-09-05 20:59:42 +00:00
|
|
|
ffs_openextattr(struct vop_openextattr_args *ap)
|
|
|
|
/*
|
|
|
|
struct vop_openextattr_args {
|
|
|
|
struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
IN struct ucred *a_cred;
|
|
|
|
IN struct thread *a_td;
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
{
|
2003-06-01 02:42:18 +00:00
|
|
|
|
2009-07-01 22:30:36 +00:00
|
|
|
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
|
2003-06-01 02:42:18 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
2002-09-05 20:59:42 +00:00
|
|
|
return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode extattr transaction commit/abort
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2002-09-05 20:59:42 +00:00
|
|
|
ffs_closeextattr(struct vop_closeextattr_args *ap)
|
|
|
|
/*
|
|
|
|
struct vop_closeextattr_args {
|
|
|
|
struct vnodeop_desc *a_desc;
|
|
|
|
struct vnode *a_vp;
|
|
|
|
int a_commit;
|
|
|
|
IN struct ucred *a_cred;
|
|
|
|
IN struct thread *a_td;
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
{
|
2003-06-01 02:42:18 +00:00
|
|
|
|
2009-07-01 22:30:36 +00:00
|
|
|
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
|
2003-06-01 02:42:18 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
2007-02-21 08:50:06 +00:00
|
|
|
if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
|
|
|
|
return (EROFS);
|
|
|
|
|
2002-09-05 20:59:42 +00:00
|
|
|
return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
|
|
|
|
}
|
|
|
|
|
2003-07-28 18:53:29 +00:00
|
|
|
/*
|
|
|
|
* Vnode operation to remove a named attribute.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_deleteextattr(struct vop_deleteextattr_args *ap)
|
|
|
|
/*
|
|
|
|
vop_deleteextattr {
|
|
|
|
IN struct vnode *a_vp;
|
|
|
|
IN int a_attrnamespace;
|
|
|
|
IN const char *a_name;
|
|
|
|
IN struct ucred *a_cred;
|
|
|
|
IN struct thread *a_td;
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
struct inode *ip;
|
2017-01-19 16:46:05 +00:00
|
|
|
struct extattr *eap;
|
|
|
|
uint32_t ul;
|
|
|
|
int olen, error, i, easize;
|
|
|
|
u_char *eae;
|
|
|
|
void *tmp;
|
2003-07-28 18:53:29 +00:00
|
|
|
|
|
|
|
ip = VTOI(ap->a_vp);
|
2002-09-05 20:59:42 +00:00
|
|
|
|
2009-07-01 22:30:36 +00:00
|
|
|
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
|
2003-07-28 18:53:29 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
|
|
|
if (strlen(ap->a_name) == 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2007-02-21 08:50:06 +00:00
|
|
|
if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
|
|
|
|
return (EROFS);
|
|
|
|
|
2003-07-28 18:53:29 +00:00
|
|
|
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
|
2008-09-03 12:46:09 +00:00
|
|
|
ap->a_cred, ap->a_td, VWRITE);
|
2003-07-28 18:53:29 +00:00
|
|
|
if (error) {
|
2009-03-12 12:43:56 +00:00
|
|
|
/*
|
|
|
|
* ffs_lock_ea is not needed there, because the vnode
|
2009-03-27 15:46:02 +00:00
|
|
|
* must be exclusively locked.
|
2009-03-12 12:43:56 +00:00
|
|
|
*/
|
2003-07-28 18:53:29 +00:00
|
|
|
if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
|
|
|
|
ip->i_ea_error = error;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2009-03-12 12:43:56 +00:00
|
|
|
error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2003-07-28 18:53:29 +00:00
|
|
|
|
2017-01-19 16:46:05 +00:00
|
|
|
/* CEM: delete could be done in-place instead */
|
2003-07-28 18:53:29 +00:00
|
|
|
eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
|
|
|
|
bcopy(ip->i_ea_area, eae, ip->i_ea_len);
|
|
|
|
easize = ip->i_ea_len;
|
|
|
|
|
|
|
|
olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
|
2017-01-19 16:46:05 +00:00
|
|
|
&eap, NULL);
|
2003-07-28 18:53:29 +00:00
|
|
|
if (olen == -1) {
|
|
|
|
/* delete but nonexistent */
|
|
|
|
free(eae, M_TEMP);
|
2009-03-12 12:43:56 +00:00
|
|
|
ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
|
2017-01-19 16:46:05 +00:00
|
|
|
return (ENOATTR);
|
2003-07-28 18:53:29 +00:00
|
|
|
}
|
2017-01-19 16:46:05 +00:00
|
|
|
ul = eap->ea_length;
|
|
|
|
i = (u_char *)EXTATTR_NEXT(eap) - eae;
|
|
|
|
bcopy(EXTATTR_NEXT(eap), eap, easize - i);
|
|
|
|
easize -= ul;
|
|
|
|
|
|
|
|
tmp = ip->i_ea_area;
|
2003-07-28 18:53:29 +00:00
|
|
|
ip->i_ea_area = eae;
|
|
|
|
ip->i_ea_len = easize;
|
2017-01-19 16:46:05 +00:00
|
|
|
free(tmp, M_TEMP);
|
2009-03-12 12:43:56 +00:00
|
|
|
error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
|
2017-01-19 16:46:05 +00:00
|
|
|
return (error);
|
2003-07-28 18:53:29 +00:00
|
|
|
}
|
2002-09-05 20:59:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode operation to retrieve a named extended attribute.
|
2002-08-13 10:33:57 +00:00
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2002-08-13 10:33:57 +00:00
|
|
|
ffs_getextattr(struct vop_getextattr_args *ap)
|
|
|
|
/*
|
|
|
|
vop_getextattr {
|
|
|
|
IN struct vnode *a_vp;
|
|
|
|
IN int a_attrnamespace;
|
|
|
|
IN const char *a_name;
|
|
|
|
INOUT struct uio *a_uio;
|
2002-10-02 05:15:34 +00:00
|
|
|
OUT size_t *a_size;
|
2002-08-13 10:33:57 +00:00
|
|
|
IN struct ucred *a_cred;
|
|
|
|
IN struct thread *a_td;
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
{
|
2002-08-19 07:01:55 +00:00
|
|
|
struct inode *ip;
|
2003-06-05 05:57:39 +00:00
|
|
|
u_char *eae, *p;
|
2002-08-19 07:01:55 +00:00
|
|
|
unsigned easize;
|
2009-03-12 12:43:56 +00:00
|
|
|
int error, ealen;
|
2002-08-13 10:33:57 +00:00
|
|
|
|
2002-08-19 07:01:55 +00:00
|
|
|
ip = VTOI(ap->a_vp);
|
|
|
|
|
2009-07-01 22:30:36 +00:00
|
|
|
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
|
2003-06-01 02:42:18 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
2002-09-05 20:59:42 +00:00
|
|
|
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
|
2008-09-03 12:46:09 +00:00
|
|
|
ap->a_cred, ap->a_td, VREAD);
|
2002-08-19 07:01:55 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2002-09-05 20:59:42 +00:00
|
|
|
|
2009-03-12 12:43:56 +00:00
|
|
|
error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2002-09-05 20:59:42 +00:00
|
|
|
eae = ip->i_ea_area;
|
|
|
|
easize = ip->i_ea_len;
|
2003-06-05 05:57:39 +00:00
|
|
|
|
|
|
|
ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
|
|
|
|
NULL, &p);
|
|
|
|
if (ealen >= 0) {
|
2002-08-19 07:01:55 +00:00
|
|
|
error = 0;
|
|
|
|
if (ap->a_size != NULL)
|
2003-06-05 05:57:39 +00:00
|
|
|
*ap->a_size = ealen;
|
|
|
|
else if (ap->a_uio != NULL)
|
|
|
|
error = uiomove(p, ealen, ap->a_uio);
|
|
|
|
} else
|
|
|
|
error = ENOATTR;
|
2009-03-12 12:43:56 +00:00
|
|
|
|
|
|
|
ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
|
2017-01-19 16:46:05 +00:00
|
|
|
return (error);
|
2003-06-05 05:57:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode operation to retrieve extended attributes on a vnode.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_listextattr(struct vop_listextattr_args *ap)
|
|
|
|
/*
|
|
|
|
vop_listextattr {
|
|
|
|
IN struct vnode *a_vp;
|
|
|
|
IN int a_attrnamespace;
|
|
|
|
INOUT struct uio *a_uio;
|
|
|
|
OUT size_t *a_size;
|
|
|
|
IN struct ucred *a_cred;
|
|
|
|
IN struct thread *a_td;
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
struct inode *ip;
|
2017-01-19 16:46:05 +00:00
|
|
|
struct extattr *eap, *eaend;
|
2009-03-12 12:43:56 +00:00
|
|
|
int error, ealen;
|
2003-06-05 05:57:39 +00:00
|
|
|
|
|
|
|
ip = VTOI(ap->a_vp);
|
|
|
|
|
2009-07-01 22:30:36 +00:00
|
|
|
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
|
2003-06-05 05:57:39 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
|
|
|
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
|
2008-09-03 12:46:09 +00:00
|
|
|
ap->a_cred, ap->a_td, VREAD);
|
2003-06-05 05:57:39 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2009-03-12 12:43:56 +00:00
|
|
|
error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2003-06-05 05:57:39 +00:00
|
|
|
|
|
|
|
error = 0;
|
|
|
|
if (ap->a_size != NULL)
|
|
|
|
*ap->a_size = 0;
|
2017-01-19 16:46:05 +00:00
|
|
|
|
|
|
|
KASSERT(ALIGNED_TO(ip->i_ea_area, struct extattr), ("unaligned"));
|
|
|
|
eap = (struct extattr *)ip->i_ea_area;
|
|
|
|
eaend = (struct extattr *)(ip->i_ea_area + ip->i_ea_len);
|
|
|
|
for (; error == 0 && eap < eaend; eap = EXTATTR_NEXT(eap)) {
|
2020-10-30 19:00:42 +00:00
|
|
|
KASSERT(EXTATTR_NEXT(eap) <= eaend,
|
|
|
|
("extattr next %p beyond %p", EXTATTR_NEXT(eap), eaend));
|
2017-01-19 16:46:05 +00:00
|
|
|
if (eap->ea_namespace != ap->a_attrnamespace)
|
2003-06-05 05:57:39 +00:00
|
|
|
continue;
|
2017-01-19 16:46:05 +00:00
|
|
|
|
|
|
|
ealen = eap->ea_namelength;
|
|
|
|
if (ap->a_size != NULL)
|
2003-06-05 05:57:39 +00:00
|
|
|
*ap->a_size += ealen + 1;
|
2017-01-19 16:46:05 +00:00
|
|
|
else if (ap->a_uio != NULL)
|
|
|
|
error = uiomove(&eap->ea_namelength, ealen + 1,
|
|
|
|
ap->a_uio);
|
2002-08-19 07:01:55 +00:00
|
|
|
}
|
2017-01-19 16:46:05 +00:00
|
|
|
|
2009-03-12 12:43:56 +00:00
|
|
|
ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
|
2017-01-19 16:46:05 +00:00
|
|
|
return (error);
|
2002-08-13 10:33:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode operation to set a named attribute.
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2002-08-13 10:33:57 +00:00
|
|
|
ffs_setextattr(struct vop_setextattr_args *ap)
|
|
|
|
/*
|
|
|
|
vop_setextattr {
|
|
|
|
IN struct vnode *a_vp;
|
|
|
|
IN int a_attrnamespace;
|
|
|
|
IN const char *a_name;
|
|
|
|
INOUT struct uio *a_uio;
|
|
|
|
IN struct ucred *a_cred;
|
|
|
|
IN struct thread *a_td;
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
{
|
2002-08-19 07:01:55 +00:00
|
|
|
struct inode *ip;
|
|
|
|
struct fs *fs;
|
2017-01-19 16:46:05 +00:00
|
|
|
struct extattr *eap;
|
2002-08-19 07:01:55 +00:00
|
|
|
uint32_t ealength, ul;
|
2012-06-21 09:20:07 +00:00
|
|
|
ssize_t ealen;
|
|
|
|
int olen, eapad1, eapad2, error, i, easize;
|
2017-01-19 16:46:05 +00:00
|
|
|
u_char *eae;
|
|
|
|
void *tmp;
|
2002-08-19 07:01:55 +00:00
|
|
|
|
|
|
|
ip = VTOI(ap->a_vp);
|
2016-09-17 16:47:34 +00:00
|
|
|
fs = ITOFS(ip);
|
2002-08-19 07:01:55 +00:00
|
|
|
|
2009-07-01 22:30:36 +00:00
|
|
|
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
|
2003-06-01 02:42:18 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
2003-06-05 05:57:39 +00:00
|
|
|
if (strlen(ap->a_name) == 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2003-07-28 18:53:29 +00:00
|
|
|
/* XXX Now unsupported API to delete EAs using NULL uio. */
|
|
|
|
if (ap->a_uio == NULL)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
2007-02-21 08:50:06 +00:00
|
|
|
if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
|
|
|
|
return (EROFS);
|
|
|
|
|
2012-06-21 09:20:07 +00:00
|
|
|
ealen = ap->a_uio->uio_resid;
|
2017-02-15 19:50:26 +00:00
|
|
|
if (ealen < 0 || ealen > lblktosize(fs, UFS_NXADDR))
|
2012-06-21 09:20:07 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
2002-09-05 20:59:42 +00:00
|
|
|
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
|
2008-09-03 12:46:09 +00:00
|
|
|
ap->a_cred, ap->a_td, VWRITE);
|
2002-09-05 20:59:42 +00:00
|
|
|
if (error) {
|
2009-03-12 12:43:56 +00:00
|
|
|
/*
|
|
|
|
* ffs_lock_ea is not needed there, because the vnode
|
2009-03-27 15:46:02 +00:00
|
|
|
* must be exclusively locked.
|
2009-03-12 12:43:56 +00:00
|
|
|
*/
|
2002-09-05 20:59:42 +00:00
|
|
|
if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
|
|
|
|
ip->i_ea_error = error;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2009-03-12 12:43:56 +00:00
|
|
|
error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2002-09-05 20:59:42 +00:00
|
|
|
|
2003-07-28 18:53:29 +00:00
|
|
|
ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
|
2017-01-19 16:46:05 +00:00
|
|
|
eapad1 = roundup2(ealength, 8) - ealength;
|
|
|
|
eapad2 = roundup2(ealen, 8) - ealen;
|
2003-07-28 18:53:29 +00:00
|
|
|
ealength += eapad1 + ealen + eapad2;
|
2002-08-13 10:33:57 +00:00
|
|
|
|
2017-01-19 16:46:05 +00:00
|
|
|
/*
|
|
|
|
* CEM: rewrites of the same size or smaller could be done in-place
|
|
|
|
* instead. (We don't acquire any fine-grained locks in here either,
|
|
|
|
* so we could also do bigger writes in-place.)
|
|
|
|
*/
|
2003-02-19 05:47:46 +00:00
|
|
|
eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
|
2002-09-05 20:59:42 +00:00
|
|
|
bcopy(ip->i_ea_area, eae, ip->i_ea_len);
|
|
|
|
easize = ip->i_ea_len;
|
2002-08-19 07:01:55 +00:00
|
|
|
|
2017-01-19 16:46:05 +00:00
|
|
|
olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
|
|
|
|
&eap, NULL);
|
2002-09-05 20:59:42 +00:00
|
|
|
if (olen == -1) {
|
2002-08-19 07:01:55 +00:00
|
|
|
/* new, append at end */
|
2017-01-19 16:46:05 +00:00
|
|
|
KASSERT(ALIGNED_TO(eae + easize, struct extattr),
|
|
|
|
("unaligned"));
|
|
|
|
eap = (struct extattr *)(eae + easize);
|
2002-08-19 07:01:55 +00:00
|
|
|
easize += ealength;
|
2002-08-30 08:57:09 +00:00
|
|
|
} else {
|
2017-01-19 16:46:05 +00:00
|
|
|
ul = eap->ea_length;
|
|
|
|
i = (u_char *)EXTATTR_NEXT(eap) - eae;
|
2002-08-30 08:57:09 +00:00
|
|
|
if (ul != ealength) {
|
2017-01-19 16:46:05 +00:00
|
|
|
bcopy(EXTATTR_NEXT(eap), (u_char *)eap + ealength,
|
|
|
|
easize - i);
|
2002-08-30 08:57:09 +00:00
|
|
|
easize += (ealength - ul);
|
|
|
|
}
|
2002-08-19 07:01:55 +00:00
|
|
|
}
|
2017-02-15 19:50:26 +00:00
|
|
|
if (easize > lblktosize(fs, UFS_NXADDR)) {
|
2002-08-19 07:01:55 +00:00
|
|
|
free(eae, M_TEMP);
|
2009-03-12 12:43:56 +00:00
|
|
|
ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
|
|
|
|
if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
|
2002-09-05 20:59:42 +00:00
|
|
|
ip->i_ea_error = ENOSPC;
|
2017-01-19 16:46:05 +00:00
|
|
|
return (ENOSPC);
|
2002-08-19 07:01:55 +00:00
|
|
|
}
|
2017-01-19 16:46:05 +00:00
|
|
|
eap->ea_length = ealength;
|
|
|
|
eap->ea_namespace = ap->a_attrnamespace;
|
|
|
|
eap->ea_contentpadlen = eapad2;
|
|
|
|
eap->ea_namelength = strlen(ap->a_name);
|
|
|
|
memcpy(eap->ea_name, ap->a_name, strlen(ap->a_name));
|
|
|
|
bzero(&eap->ea_name[strlen(ap->a_name)], eapad1);
|
|
|
|
error = uiomove(EXTATTR_CONTENT(eap), ealen, ap->a_uio);
|
2003-07-28 18:53:29 +00:00
|
|
|
if (error) {
|
|
|
|
free(eae, M_TEMP);
|
2009-03-12 12:43:56 +00:00
|
|
|
ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
|
|
|
|
if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
|
2003-07-28 18:53:29 +00:00
|
|
|
ip->i_ea_error = error;
|
2017-01-19 16:46:05 +00:00
|
|
|
return (error);
|
2002-08-19 07:01:55 +00:00
|
|
|
}
|
2017-01-19 16:46:05 +00:00
|
|
|
bzero((u_char *)EXTATTR_CONTENT(eap) + ealen, eapad2);
|
2003-07-28 18:53:29 +00:00
|
|
|
|
2017-01-19 16:46:05 +00:00
|
|
|
tmp = ip->i_ea_area;
|
2002-09-05 20:59:42 +00:00
|
|
|
ip->i_ea_area = eae;
|
|
|
|
ip->i_ea_len = easize;
|
2017-01-19 16:46:05 +00:00
|
|
|
free(tmp, M_TEMP);
|
2009-03-12 12:43:56 +00:00
|
|
|
error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
|
2017-01-19 16:46:05 +00:00
|
|
|
return (error);
|
2002-08-13 10:33:57 +00:00
|
|
|
}
|
2007-02-15 22:08:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode pointer to File handle
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_vptofh(struct vop_vptofh_args *ap)
|
|
|
|
/*
|
|
|
|
vop_vptofh {
|
|
|
|
IN struct vnode *a_vp;
|
|
|
|
IN struct fid *a_fhp;
|
|
|
|
};
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
struct inode *ip;
|
|
|
|
struct ufid *ufhp;
|
|
|
|
|
|
|
|
ip = VTOI(ap->a_vp);
|
|
|
|
ufhp = (struct ufid *)ap->a_fhp;
|
|
|
|
ufhp->ufid_len = sizeof(struct ufid);
|
|
|
|
ufhp->ufid_ino = ip->i_number;
|
|
|
|
ufhp->ufid_gen = ip->i_gen;
|
|
|
|
return (0);
|
|
|
|
}
|
2016-10-19 11:09:29 +00:00
|
|
|
|
|
|
|
SYSCTL_DECL(_vfs_ffs);
|
|
|
|
static int use_buf_pager = 1;
|
|
|
|
SYSCTL_INT(_vfs_ffs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0,
|
|
|
|
"Always use buffer pager instead of bmap");
|
|
|
|
|
2016-10-28 11:43:59 +00:00
|
|
|
static daddr_t
|
|
|
|
ffs_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (lblkno(VFSTOUFS(vp->v_mount)->um_fs, off));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ffs_gbp_getblksz(struct vnode *vp, daddr_t lbn)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (blksize(VFSTOUFS(vp->v_mount)->um_fs, VTOI(vp), lbn));
|
|
|
|
}
|
|
|
|
|
2016-10-19 11:09:29 +00:00
|
|
|
static int
|
|
|
|
ffs_getpages(struct vop_getpages_args *ap)
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
struct ufsmount *um;
|
|
|
|
|
|
|
|
vp = ap->a_vp;
|
2016-10-28 11:43:59 +00:00
|
|
|
um = VFSTOUFS(vp->v_mount);
|
2016-10-19 11:09:29 +00:00
|
|
|
|
2016-10-28 11:43:59 +00:00
|
|
|
if (!use_buf_pager && um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE)
|
|
|
|
return (vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
|
2016-10-19 11:09:29 +00:00
|
|
|
ap->a_rbehind, ap->a_rahead, NULL, NULL));
|
2016-10-28 11:43:59 +00:00
|
|
|
return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind,
|
|
|
|
ap->a_rahead, ffs_gbp_getblkno, ffs_gbp_getblksz));
|
2016-10-19 11:09:29 +00:00
|
|
|
}
|
2019-02-26 04:56:10 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
ffs_getpages_async(struct vop_getpages_async_args *ap)
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
struct ufsmount *um;
|
2020-03-30 21:44:30 +00:00
|
|
|
bool do_iodone;
|
2019-02-26 04:56:10 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
vp = ap->a_vp;
|
|
|
|
um = VFSTOUFS(vp->v_mount);
|
2020-03-30 21:44:30 +00:00
|
|
|
do_iodone = true;
|
2019-02-26 04:56:10 +00:00
|
|
|
|
2020-03-30 21:44:30 +00:00
|
|
|
if (um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE) {
|
|
|
|
error = vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
|
|
|
|
ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg);
|
|
|
|
if (error == 0)
|
|
|
|
do_iodone = false;
|
|
|
|
} else {
|
|
|
|
error = vfs_bio_getpages(vp, ap->a_m, ap->a_count,
|
|
|
|
ap->a_rbehind, ap->a_rahead, ffs_gbp_getblkno,
|
|
|
|
ffs_gbp_getblksz);
|
|
|
|
}
|
|
|
|
if (do_iodone && ap->a_iodone != NULL)
|
|
|
|
ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
|
2019-02-26 04:56:10 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|