2011-01-26 20:03:58 +00:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
Only commit the ZIL once in zpl_writepages() (msync() case).
Currently, using msync() results in the following code path:
sys_msync -> zpl_fsync -> filemap_write_and_wait_range -> zpl_writepages -> write_cache_pages -> zpl_putpage
In such a code path, zil_commit() is called as part of zpl_putpage().
This means that for each page, the write is handed to the DMU, the ZIL
is committed, and only then do we move on to the next page. As one might
imagine, this results in atrocious performance where there is a large
number of pages to write: instead of committing a batch of N writes,
we do N commits containing one page each. In some extreme cases this
can result in msync() being ~700 times slower than it should be, as well
as very inefficient use of ZIL resources.
This patch fixes this issue by making sure that the requested writes
are batched and then committed only once. Unfortunately, the
implementation is somewhat non-trivial because there is no way to run
write_cache_pages in SYNC mode (so that we get all pages) without
making it wait on the writeback tag for each page.
The solution implemented here is composed of two parts:
- I added a new callback system to the ZIL, which allows the caller to
be notified when its ITX gets written to stable storage. One nice
thing is that the callback is called not only in zil_commit() but
in zil_sync() as well, which means that the caller doesn't have to
care whether the write ended up in the ZIL or the DMU: it will get
notified as soon as it's safe, period. This is an improvement over
dmu_tx_callback_register() that was used previously, which only
supports DMU writes. The rationale for this change is to allow
zpl_putpage() to be notified when a ZIL commit is completed without
having to block on zil_commit() itself.
- zpl_writepages() now calls write_cache_pages in non-SYNC mode, which
will prevent (1) write_cache_pages from blocking, and (2) zpl_putpage
from issuing ZIL commits. zpl_writepages() will issue the commit
itself instead of relying on zpl_putpage() to do it, thus nicely
batching the writes. Note, however, that we still have to call
write_cache_pages() again in SYNC mode because there is an edge case
documented in the implementation of write_cache_pages() whereas it
will not give us all dirty pages when running in non-SYNC mode. Thus
we need to run it at least once in SYNC mode to make sure we honor
persistency guarantees. This only happens when the pages are
modified at the same time msync() is running, which should be rare.
In most cases there won't be any additional pages and this second
call will do nothing.
Note that this change also fixes a bug related to #907 whereas calling
msync() on pages that were already handed over to the DMU in a previous
writepages() call would make msync() block until the next TXG sync
instead of returning as soon as the ZIL commit is complete. The new
callback system fixes that problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1849
Closes #907
2013-11-10 15:00:11 +00:00
|
|
|
#include <sys/dmu_objset.h>
|
2011-01-26 20:03:58 +00:00
|
|
|
#include <sys/zfs_vfsops.h>
|
|
|
|
#include <sys/zfs_vnops.h>
|
|
|
|
#include <sys/zfs_znode.h>
|
|
|
|
#include <sys/zpl.h>
|
|
|
|
|
|
|
|
|
2011-03-08 19:04:51 +00:00
|
|
|
static int
|
|
|
|
zpl_open(struct inode *ip, struct file *filp)
|
|
|
|
{
|
2011-03-22 18:13:41 +00:00
|
|
|
cred_t *cr = CRED();
|
2011-03-08 19:04:51 +00:00
|
|
|
int error;
|
|
|
|
|
2013-12-17 18:18:25 +00:00
|
|
|
error = generic_file_open(ip, filp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2011-03-22 18:13:41 +00:00
|
|
|
crhold(cr);
|
2011-03-08 19:04:51 +00:00
|
|
|
error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
|
2011-03-22 18:13:41 +00:00
|
|
|
crfree(cr);
|
2011-03-08 19:04:51 +00:00
|
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
|
2013-12-17 18:18:25 +00:00
|
|
|
return (error);
|
2011-03-08 19:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zpl_release(struct inode *ip, struct file *filp)
|
|
|
|
{
|
2011-03-22 18:13:41 +00:00
|
|
|
cred_t *cr = CRED();
|
2011-03-08 19:04:51 +00:00
|
|
|
int error;
|
|
|
|
|
2013-07-26 17:38:49 +00:00
|
|
|
if (ITOZ(ip)->z_atime_dirty)
|
2014-07-15 20:29:57 +00:00
|
|
|
zfs_mark_inode_dirty(ip);
|
2013-07-26 17:38:49 +00:00
|
|
|
|
2011-03-22 18:13:41 +00:00
|
|
|
crhold(cr);
|
2011-03-08 19:04:51 +00:00
|
|
|
error = -zfs_close(ip, filp->f_flags, cr);
|
2011-03-22 18:13:41 +00:00
|
|
|
crfree(cr);
|
2011-03-08 19:04:51 +00:00
|
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2011-01-26 20:03:58 +00:00
|
|
|
static int
|
2013-08-07 12:53:45 +00:00
|
|
|
zpl_iterate(struct file *filp, struct dir_context *ctx)
|
2011-01-26 20:03:58 +00:00
|
|
|
{
|
|
|
|
struct dentry *dentry = filp->f_path.dentry;
|
2011-03-22 18:13:41 +00:00
|
|
|
cred_t *cr = CRED();
|
2011-01-26 20:03:58 +00:00
|
|
|
int error;
|
|
|
|
|
2011-03-22 18:13:41 +00:00
|
|
|
crhold(cr);
|
2013-08-07 12:53:45 +00:00
|
|
|
error = -zfs_readdir(dentry->d_inode, ctx, cr);
|
2011-03-22 18:13:41 +00:00
|
|
|
crfree(cr);
|
2011-01-26 20:03:58 +00:00
|
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2013-08-07 12:53:45 +00:00
|
|
|
#if !defined(HAVE_VFS_ITERATE)
|
|
|
|
static int
|
|
|
|
zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|
|
|
{
|
|
|
|
struct dir_context ctx = DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = zpl_iterate(filp, &ctx);
|
|
|
|
filp->f_pos = ctx.pos;
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif /* HAVE_VFS_ITERATE */
|
|
|
|
|
2011-11-10 04:47:59 +00:00
|
|
|
#if defined(HAVE_FSYNC_WITH_DENTRY)
|
2011-05-06 19:23:34 +00:00
|
|
|
/*
|
2011-11-10 04:47:59 +00:00
|
|
|
* Linux 2.6.x - 2.6.34 API,
|
|
|
|
* Through 2.6.34 the nfsd kernel server would pass a NULL 'file struct *'
|
|
|
|
* to the fops->fsync() hook. For this reason, we must be careful not to
|
|
|
|
* use filp unconditionally.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zpl_fsync(struct file *filp, struct dentry *dentry, int datasync)
|
|
|
|
{
|
|
|
|
cred_t *cr = CRED();
|
|
|
|
int error;
|
|
|
|
|
|
|
|
crhold(cr);
|
|
|
|
error = -zfs_fsync(dentry->d_inode, datasync, cr);
|
|
|
|
crfree(cr);
|
|
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
static int
|
|
|
|
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
|
|
|
|
{
|
|
|
|
struct file *filp = kiocb->ki_filp;
|
|
|
|
return (zpl_fsync(filp, filp->f_path.dentry, datasync));
|
|
|
|
}
|
2011-11-10 04:47:59 +00:00
|
|
|
#elif defined(HAVE_FSYNC_WITHOUT_DENTRY)
|
|
|
|
/*
|
|
|
|
* Linux 2.6.35 - 3.0 API,
|
|
|
|
* As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
|
2011-05-06 19:23:34 +00:00
|
|
|
* redundant. The dentry is still accessible via filp->f_path.dentry,
|
|
|
|
* and we are guaranteed that filp will never be NULL.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zpl_fsync(struct file *filp, int datasync)
|
|
|
|
{
|
2011-11-10 04:47:59 +00:00
|
|
|
struct inode *inode = filp->f_mapping->host;
|
|
|
|
cred_t *cr = CRED();
|
|
|
|
int error;
|
|
|
|
|
|
|
|
crhold(cr);
|
|
|
|
error = -zfs_fsync(inode, datasync, cr);
|
|
|
|
crfree(cr);
|
|
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
static int
|
|
|
|
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
|
|
|
|
{
|
|
|
|
return (zpl_fsync(kiocb->ki_filp, datasync));
|
|
|
|
}
|
2011-11-10 04:47:59 +00:00
|
|
|
#elif defined(HAVE_FSYNC_RANGE)
|
|
|
|
/*
|
|
|
|
* Linux 3.1 - 3.x API,
|
|
|
|
* As of 3.1 the responsibility to call filemap_write_and_wait_range() has
|
|
|
|
* been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
|
|
|
|
* lock is no longer held by the caller, for zfs we don't require the lock
|
|
|
|
* to be held so we don't acquire it.
|
|
|
|
*/
|
2011-05-06 19:23:34 +00:00
|
|
|
static int
|
2011-11-10 04:47:59 +00:00
|
|
|
zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
|
2011-01-26 20:03:58 +00:00
|
|
|
{
|
2011-11-10 04:47:59 +00:00
|
|
|
struct inode *inode = filp->f_mapping->host;
|
2011-03-22 18:13:41 +00:00
|
|
|
cred_t *cr = CRED();
|
2011-01-26 20:03:58 +00:00
|
|
|
int error;
|
|
|
|
|
2011-11-10 04:47:59 +00:00
|
|
|
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2011-03-22 18:13:41 +00:00
|
|
|
crhold(cr);
|
2011-11-10 04:47:59 +00:00
|
|
|
error = -zfs_fsync(inode, datasync, cr);
|
2011-03-22 18:13:41 +00:00
|
|
|
crfree(cr);
|
2011-01-26 20:03:58 +00:00
|
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
|
|
|
|
{
|
|
|
|
return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos,
|
|
|
|
kiocb->ki_pos + kiocb->ki_nbytes, datasync));
|
|
|
|
}
|
2011-11-10 04:47:59 +00:00
|
|
|
#else
|
|
|
|
#error "Unsupported fops->fsync() implementation"
|
|
|
|
#endif
|
2011-01-26 20:03:58 +00:00
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
static inline ssize_t
|
|
|
|
zpl_read_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
|
|
|
|
unsigned long nr_segs, loff_t *ppos, uio_seg_t segment,
|
|
|
|
int flags, cred_t *cr)
|
2011-01-26 20:03:58 +00:00
|
|
|
{
|
2013-11-15 17:59:09 +00:00
|
|
|
ssize_t read;
|
2011-01-26 20:03:58 +00:00
|
|
|
uio_t uio;
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
int error;
|
2011-01-26 20:03:58 +00:00
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
uio.uio_iov = (struct iovec *)iovp;
|
|
|
|
uio.uio_resid = count;
|
|
|
|
uio.uio_iovcnt = nr_segs;
|
|
|
|
uio.uio_loffset = *ppos;
|
2011-01-26 20:03:58 +00:00
|
|
|
uio.uio_limit = MAXOFFSET_T;
|
|
|
|
uio.uio_segflg = segment;
|
|
|
|
|
|
|
|
error = -zfs_read(ip, &uio, flags, cr);
|
|
|
|
if (error < 0)
|
|
|
|
return (error);
|
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
read = count - uio.uio_resid;
|
|
|
|
*ppos += read;
|
2013-11-15 17:59:09 +00:00
|
|
|
task_io_account_read(read);
|
|
|
|
|
|
|
|
return (read);
|
2011-01-26 20:03:58 +00:00
|
|
|
}
|
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
inline ssize_t
|
|
|
|
zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
|
|
|
|
uio_seg_t segment, int flags, cred_t *cr)
|
|
|
|
{
|
|
|
|
struct iovec iov;
|
|
|
|
|
|
|
|
iov.iov_base = (void *)buf;
|
|
|
|
iov.iov_len = len;
|
|
|
|
|
|
|
|
return (zpl_read_common_iovec(ip, &iov, len, 1, ppos, segment,
|
|
|
|
flags, cr));
|
|
|
|
}
|
|
|
|
|
2011-01-26 20:03:58 +00:00
|
|
|
static ssize_t
|
|
|
|
zpl_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
|
|
|
|
{
|
2011-03-22 18:13:41 +00:00
|
|
|
cred_t *cr = CRED();
|
2011-01-26 20:03:58 +00:00
|
|
|
ssize_t read;
|
|
|
|
|
2011-03-22 18:13:41 +00:00
|
|
|
crhold(cr);
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
read = zpl_read_common(filp->f_mapping->host, buf, len, ppos,
|
2011-01-26 20:03:58 +00:00
|
|
|
UIO_USERSPACE, filp->f_flags, cr);
|
2011-03-22 18:13:41 +00:00
|
|
|
crfree(cr);
|
2011-01-26 20:03:58 +00:00
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
return (read);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
zpl_aio_read(struct kiocb *kiocb, const struct iovec *iovp,
|
|
|
|
unsigned long nr_segs, loff_t pos)
|
|
|
|
{
|
|
|
|
cred_t *cr = CRED();
|
|
|
|
struct file *filp = kiocb->ki_filp;
|
|
|
|
size_t count = kiocb->ki_nbytes;
|
|
|
|
ssize_t read;
|
|
|
|
size_t alloc_size = sizeof (struct iovec) * nr_segs;
|
|
|
|
struct iovec *iov_tmp = kmem_alloc(alloc_size, KM_SLEEP);
|
|
|
|
bcopy(iovp, iov_tmp, alloc_size);
|
|
|
|
|
|
|
|
ASSERT(iovp);
|
|
|
|
|
|
|
|
crhold(cr);
|
|
|
|
read = zpl_read_common_iovec(filp->f_mapping->host, iov_tmp, count,
|
|
|
|
nr_segs, &kiocb->ki_pos, UIO_USERSPACE, filp->f_flags, cr);
|
|
|
|
crfree(cr);
|
|
|
|
|
|
|
|
kmem_free(iov_tmp, alloc_size);
|
2011-01-26 20:03:58 +00:00
|
|
|
|
|
|
|
return (read);
|
|
|
|
}
|
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
static inline ssize_t
|
|
|
|
zpl_write_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
|
|
|
|
unsigned long nr_segs, loff_t *ppos, uio_seg_t segment,
|
|
|
|
int flags, cred_t *cr)
|
2011-01-26 20:03:58 +00:00
|
|
|
{
|
2013-11-15 17:59:09 +00:00
|
|
|
ssize_t wrote;
|
2011-01-26 20:03:58 +00:00
|
|
|
uio_t uio;
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
int error;
|
2011-01-26 20:03:58 +00:00
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
uio.uio_iov = (struct iovec *)iovp;
|
|
|
|
uio.uio_resid = count;
|
|
|
|
uio.uio_iovcnt = nr_segs;
|
|
|
|
uio.uio_loffset = *ppos;
|
2011-01-26 20:03:58 +00:00
|
|
|
uio.uio_limit = MAXOFFSET_T;
|
|
|
|
uio.uio_segflg = segment;
|
|
|
|
|
|
|
|
error = -zfs_write(ip, &uio, flags, cr);
|
|
|
|
if (error < 0)
|
|
|
|
return (error);
|
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
wrote = count - uio.uio_resid;
|
|
|
|
*ppos += wrote;
|
2013-11-15 17:59:09 +00:00
|
|
|
task_io_account_write(wrote);
|
|
|
|
|
|
|
|
return (wrote);
|
2011-01-26 20:03:58 +00:00
|
|
|
}
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
inline ssize_t
|
|
|
|
zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
|
|
|
|
uio_seg_t segment, int flags, cred_t *cr)
|
|
|
|
{
|
|
|
|
struct iovec iov;
|
|
|
|
|
|
|
|
iov.iov_base = (void *)buf;
|
|
|
|
iov.iov_len = len;
|
|
|
|
|
|
|
|
return (zpl_write_common_iovec(ip, &iov, len, 1, ppos, segment,
|
|
|
|
flags, cr));
|
|
|
|
}
|
2011-01-26 20:03:58 +00:00
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
zpl_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
|
|
|
|
{
|
2011-03-22 18:13:41 +00:00
|
|
|
cred_t *cr = CRED();
|
2011-01-26 20:03:58 +00:00
|
|
|
ssize_t wrote;
|
|
|
|
|
2011-03-22 18:13:41 +00:00
|
|
|
crhold(cr);
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
wrote = zpl_write_common(filp->f_mapping->host, buf, len, ppos,
|
2011-01-26 20:03:58 +00:00
|
|
|
UIO_USERSPACE, filp->f_flags, cr);
|
2011-03-22 18:13:41 +00:00
|
|
|
crfree(cr);
|
2011-01-26 20:03:58 +00:00
|
|
|
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
return (wrote);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
zpl_aio_write(struct kiocb *kiocb, const struct iovec *iovp,
|
|
|
|
unsigned long nr_segs, loff_t pos)
|
|
|
|
{
|
|
|
|
cred_t *cr = CRED();
|
|
|
|
struct file *filp = kiocb->ki_filp;
|
|
|
|
size_t count = kiocb->ki_nbytes;
|
|
|
|
ssize_t wrote;
|
|
|
|
size_t alloc_size = sizeof (struct iovec) * nr_segs;
|
|
|
|
struct iovec *iov_tmp = kmem_alloc(alloc_size, KM_SLEEP);
|
|
|
|
bcopy(iovp, iov_tmp, alloc_size);
|
|
|
|
|
|
|
|
ASSERT(iovp);
|
|
|
|
|
|
|
|
crhold(cr);
|
|
|
|
wrote = zpl_write_common_iovec(filp->f_mapping->host, iov_tmp, count,
|
|
|
|
nr_segs, &kiocb->ki_pos, UIO_USERSPACE, filp->f_flags, cr);
|
|
|
|
crfree(cr);
|
|
|
|
|
|
|
|
kmem_free(iov_tmp, alloc_size);
|
2011-01-26 20:03:58 +00:00
|
|
|
|
|
|
|
return (wrote);
|
|
|
|
}
|
|
|
|
|
2013-06-13 17:51:09 +00:00
|
|
|
static loff_t
|
|
|
|
zpl_llseek(struct file *filp, loff_t offset, int whence)
|
|
|
|
{
|
|
|
|
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
|
|
|
|
if (whence == SEEK_DATA || whence == SEEK_HOLE) {
|
|
|
|
struct inode *ip = filp->f_mapping->host;
|
|
|
|
loff_t maxbytes = ip->i_sb->s_maxbytes;
|
|
|
|
loff_t error;
|
|
|
|
|
|
|
|
spl_inode_lock(ip);
|
|
|
|
error = -zfs_holey(ip, whence, &offset);
|
|
|
|
if (error == 0)
|
|
|
|
error = lseek_execute(filp, ip, offset, maxbytes);
|
|
|
|
spl_inode_unlock(ip);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif /* SEEK_HOLE && SEEK_DATA */
|
|
|
|
|
2013-11-01 19:26:11 +00:00
|
|
|
return (generic_file_llseek(filp, offset, whence));
|
2013-06-13 17:51:09 +00:00
|
|
|
}
|
|
|
|
|
2011-02-03 18:34:05 +00:00
|
|
|
/*
|
|
|
|
* It's worth taking a moment to describe how mmap is implemented
|
|
|
|
* for zfs because it differs considerably from other Linux filesystems.
|
|
|
|
* However, this issue is handled the same way under OpenSolaris.
|
|
|
|
*
|
|
|
|
* The issue is that by design zfs bypasses the Linux page cache and
|
|
|
|
* leaves all caching up to the ARC. This has been shown to work
|
|
|
|
* well for the common read(2)/write(2) case. However, mmap(2)
|
|
|
|
* is problem because it relies on being tightly integrated with the
|
|
|
|
* page cache. To handle this we cache mmap'ed files twice, once in
|
|
|
|
* the ARC and a second time in the page cache. The code is careful
|
|
|
|
* to keep both copies synchronized.
|
|
|
|
*
|
|
|
|
* When a file with an mmap'ed region is written to using write(2)
|
|
|
|
* both the data in the ARC and existing pages in the page cache
|
|
|
|
* are updated. For a read(2) data will be read first from the page
|
|
|
|
* cache then the ARC if needed. Neither a write(2) or read(2) will
|
|
|
|
* will ever result in new pages being added to the page cache.
|
|
|
|
*
|
|
|
|
* New pages are added to the page cache only via .readpage() which
|
|
|
|
* is called when the vfs needs to read a page off disk to back the
|
|
|
|
* virtual memory region. These pages may be modified without
|
|
|
|
* notifying the ARC and will be written out periodically via
|
|
|
|
* .writepage(). This will occur due to either a sync or the usual
|
|
|
|
* page aging behavior. Note because a read(2) of a mmap'ed file
|
|
|
|
* will always check the page cache first even when the ARC is out
|
|
|
|
* of date correct data will still be returned.
|
|
|
|
*
|
|
|
|
* While this implementation ensures correct behavior it does have
|
|
|
|
* have some drawbacks. The most obvious of which is that it
|
|
|
|
* increases the required memory footprint when access mmap'ed
|
|
|
|
* files. It also adds additional complexity to the code keeping
|
|
|
|
* both caches synchronized.
|
|
|
|
*
|
|
|
|
* Longer term it may be possible to cleanly resolve this wart by
|
|
|
|
* mapping page cache pages directly on to the ARC buffers. The
|
|
|
|
* Linux address space operations are flexible enough to allow
|
|
|
|
* selection of which pages back a particular index. The trick
|
|
|
|
* would be working out the details of which subsystem is in
|
|
|
|
* charge, the ARC, the page cache, or both. It may also prove
|
|
|
|
* helpful to move the ARC buffers to a scatter-gather lists
|
|
|
|
* rather than a vmalloc'ed region.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zpl_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
|
|
{
|
2011-07-01 18:25:07 +00:00
|
|
|
struct inode *ip = filp->f_mapping->host;
|
|
|
|
znode_t *zp = ITOZ(ip);
|
2011-02-03 18:34:05 +00:00
|
|
|
int error;
|
|
|
|
|
2011-07-01 18:25:07 +00:00
|
|
|
error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
|
|
|
|
(size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2011-02-03 18:34:05 +00:00
|
|
|
error = generic_file_mmap(filp, vma);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
mutex_enter(&zp->z_lock);
|
|
|
|
zp->z_is_mapped = 1;
|
|
|
|
mutex_exit(&zp->z_lock);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Populate a page with data for the Linux page cache. This function is
|
|
|
|
* only used to support mmap(2). There will be an identical copy of the
|
|
|
|
* data in the ARC which is kept up to date via .write() and .writepage().
|
|
|
|
*
|
|
|
|
* Current this function relies on zpl_read_common() and the O_DIRECT
|
|
|
|
* flag to read in a page. This works but the more correct way is to
|
|
|
|
* update zfs_fillpage() to be Linux friendly and use that interface.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zpl_readpage(struct file *filp, struct page *pp)
|
|
|
|
{
|
|
|
|
struct inode *ip;
|
2011-05-28 01:53:07 +00:00
|
|
|
struct page *pl[1];
|
2011-02-03 18:34:05 +00:00
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
ASSERT(PageLocked(pp));
|
|
|
|
ip = pp->mapping->host;
|
2011-05-28 01:53:07 +00:00
|
|
|
pl[0] = pp;
|
2011-02-03 18:34:05 +00:00
|
|
|
|
2011-05-28 01:53:07 +00:00
|
|
|
error = -zfs_getpage(ip, pl, 1);
|
2011-02-03 18:34:05 +00:00
|
|
|
|
2011-05-28 01:53:07 +00:00
|
|
|
if (error) {
|
|
|
|
SetPageError(pp);
|
|
|
|
ClearPageUptodate(pp);
|
|
|
|
} else {
|
|
|
|
ClearPageError(pp);
|
|
|
|
SetPageUptodate(pp);
|
|
|
|
flush_dcache_page(pp);
|
|
|
|
}
|
2011-02-03 18:34:05 +00:00
|
|
|
|
2011-05-28 01:53:07 +00:00
|
|
|
unlock_page(pp);
|
2013-11-01 19:26:11 +00:00
|
|
|
return (error);
|
2011-05-28 01:53:07 +00:00
|
|
|
}
|
2011-02-03 18:34:05 +00:00
|
|
|
|
2011-08-04 23:25:43 +00:00
|
|
|
/*
|
|
|
|
* Populate a set of pages with data for the Linux page cache. This
|
|
|
|
* function will only be called for read ahead and never for demand
|
|
|
|
* paging. For simplicity, the code relies on read_cache_pages() to
|
|
|
|
* correctly lock each page for IO and call zpl_readpage().
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zpl_readpages(struct file *filp, struct address_space *mapping,
|
|
|
|
struct list_head *pages, unsigned nr_pages)
|
|
|
|
{
|
2011-08-19 21:49:49 +00:00
|
|
|
return (read_cache_pages(mapping, pages,
|
|
|
|
(filler_t *)zpl_readpage, filp));
|
2011-08-04 23:25:43 +00:00
|
|
|
}
|
|
|
|
|
2011-05-28 01:53:07 +00:00
|
|
|
int
|
|
|
|
zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
|
|
|
|
{
|
2011-08-02 04:28:51 +00:00
|
|
|
struct address_space *mapping = data;
|
|
|
|
|
|
|
|
ASSERT(PageLocked(pp));
|
|
|
|
ASSERT(!PageWriteback(pp));
|
2012-08-18 18:17:23 +00:00
|
|
|
ASSERT(!(current->flags & PF_NOFS));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Annotate this call path with a flag that indicates that it is
|
|
|
|
* unsafe to use KM_SLEEP during memory allocations due to the
|
|
|
|
* potential for a deadlock. KM_PUSHPAGE should be used instead.
|
|
|
|
*/
|
|
|
|
current->flags |= PF_NOFS;
|
2012-05-07 18:14:45 +00:00
|
|
|
(void) zfs_putpage(mapping->host, pp, wbc);
|
2012-08-18 18:17:23 +00:00
|
|
|
current->flags &= ~PF_NOFS;
|
2011-02-03 18:34:05 +00:00
|
|
|
|
2011-08-02 04:28:51 +00:00
|
|
|
return (0);
|
2011-05-28 01:53:07 +00:00
|
|
|
}
|
2011-02-03 18:34:05 +00:00
|
|
|
|
2011-05-28 01:53:07 +00:00
|
|
|
static int
|
|
|
|
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
|
|
|
{
|
Only commit the ZIL once in zpl_writepages() (msync() case).
Currently, using msync() results in the following code path:
sys_msync -> zpl_fsync -> filemap_write_and_wait_range -> zpl_writepages -> write_cache_pages -> zpl_putpage
In such a code path, zil_commit() is called as part of zpl_putpage().
This means that for each page, the write is handed to the DMU, the ZIL
is committed, and only then do we move on to the next page. As one might
imagine, this results in atrocious performance where there is a large
number of pages to write: instead of committing a batch of N writes,
we do N commits containing one page each. In some extreme cases this
can result in msync() being ~700 times slower than it should be, as well
as very inefficient use of ZIL resources.
This patch fixes this issue by making sure that the requested writes
are batched and then committed only once. Unfortunately, the
implementation is somewhat non-trivial because there is no way to run
write_cache_pages in SYNC mode (so that we get all pages) without
making it wait on the writeback tag for each page.
The solution implemented here is composed of two parts:
- I added a new callback system to the ZIL, which allows the caller to
be notified when its ITX gets written to stable storage. One nice
thing is that the callback is called not only in zil_commit() but
in zil_sync() as well, which means that the caller doesn't have to
care whether the write ended up in the ZIL or the DMU: it will get
notified as soon as it's safe, period. This is an improvement over
dmu_tx_callback_register() that was used previously, which only
supports DMU writes. The rationale for this change is to allow
zpl_putpage() to be notified when a ZIL commit is completed without
having to block on zil_commit() itself.
- zpl_writepages() now calls write_cache_pages in non-SYNC mode, which
will prevent (1) write_cache_pages from blocking, and (2) zpl_putpage
from issuing ZIL commits. zpl_writepages() will issue the commit
itself instead of relying on zpl_putpage() to do it, thus nicely
batching the writes. Note, however, that we still have to call
write_cache_pages() again in SYNC mode because there is an edge case
documented in the implementation of write_cache_pages() whereas it
will not give us all dirty pages when running in non-SYNC mode. Thus
we need to run it at least once in SYNC mode to make sure we honor
persistency guarantees. This only happens when the pages are
modified at the same time msync() is running, which should be rare.
In most cases there won't be any additional pages and this second
call will do nothing.
Note that this change also fixes a bug related to #907 whereas calling
msync() on pages that were already handed over to the DMU in a previous
writepages() call would make msync() block until the next TXG sync
instead of returning as soon as the ZIL commit is complete. The new
callback system fixes that problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1849
Closes #907
2013-11-10 15:00:11 +00:00
|
|
|
znode_t *zp = ITOZ(mapping->host);
|
|
|
|
zfs_sb_t *zsb = ITOZSB(mapping->host);
|
|
|
|
enum writeback_sync_modes sync_mode;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
ZFS_ENTER(zsb);
|
|
|
|
if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
|
|
|
|
wbc->sync_mode = WB_SYNC_ALL;
|
|
|
|
ZFS_EXIT(zsb);
|
|
|
|
sync_mode = wbc->sync_mode;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't want to run write_cache_pages() in SYNC mode here, because
|
|
|
|
* that would make putpage() wait for a single page to be committed to
|
|
|
|
* disk every single time, resulting in atrocious performance. Instead
|
|
|
|
* we run it once in non-SYNC mode so that the ZIL gets all the data,
|
|
|
|
* and then we commit it all in one go.
|
|
|
|
*/
|
|
|
|
wbc->sync_mode = WB_SYNC_NONE;
|
|
|
|
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
|
|
|
|
if (sync_mode != wbc->sync_mode) {
|
|
|
|
ZFS_ENTER(zsb);
|
|
|
|
ZFS_VERIFY_ZP(zp);
|
2014-07-15 20:29:57 +00:00
|
|
|
if (zsb->z_log != NULL)
|
|
|
|
zil_commit(zsb->z_log, zp->z_id);
|
Only commit the ZIL once in zpl_writepages() (msync() case).
Currently, using msync() results in the following code path:
sys_msync -> zpl_fsync -> filemap_write_and_wait_range -> zpl_writepages -> write_cache_pages -> zpl_putpage
In such a code path, zil_commit() is called as part of zpl_putpage().
This means that for each page, the write is handed to the DMU, the ZIL
is committed, and only then do we move on to the next page. As one might
imagine, this results in atrocious performance where there is a large
number of pages to write: instead of committing a batch of N writes,
we do N commits containing one page each. In some extreme cases this
can result in msync() being ~700 times slower than it should be, as well
as very inefficient use of ZIL resources.
This patch fixes this issue by making sure that the requested writes
are batched and then committed only once. Unfortunately, the
implementation is somewhat non-trivial because there is no way to run
write_cache_pages in SYNC mode (so that we get all pages) without
making it wait on the writeback tag for each page.
The solution implemented here is composed of two parts:
- I added a new callback system to the ZIL, which allows the caller to
be notified when its ITX gets written to stable storage. One nice
thing is that the callback is called not only in zil_commit() but
in zil_sync() as well, which means that the caller doesn't have to
care whether the write ended up in the ZIL or the DMU: it will get
notified as soon as it's safe, period. This is an improvement over
dmu_tx_callback_register() that was used previously, which only
supports DMU writes. The rationale for this change is to allow
zpl_putpage() to be notified when a ZIL commit is completed without
having to block on zil_commit() itself.
- zpl_writepages() now calls write_cache_pages in non-SYNC mode, which
will prevent (1) write_cache_pages from blocking, and (2) zpl_putpage
from issuing ZIL commits. zpl_writepages() will issue the commit
itself instead of relying on zpl_putpage() to do it, thus nicely
batching the writes. Note, however, that we still have to call
write_cache_pages() again in SYNC mode because there is an edge case
documented in the implementation of write_cache_pages() whereas it
will not give us all dirty pages when running in non-SYNC mode. Thus
we need to run it at least once in SYNC mode to make sure we honor
persistency guarantees. This only happens when the pages are
modified at the same time msync() is running, which should be rare.
In most cases there won't be any additional pages and this second
call will do nothing.
Note that this change also fixes a bug related to #907 whereas calling
msync() on pages that were already handed over to the DMU in a previous
writepages() call would make msync() block until the next TXG sync
instead of returning as soon as the ZIL commit is complete. The new
callback system fixes that problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1849
Closes #907
2013-11-10 15:00:11 +00:00
|
|
|
ZFS_EXIT(zsb);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to call write_cache_pages() again (we can't just
|
|
|
|
* return after the commit) because the previous call in
|
|
|
|
* non-SYNC mode does not guarantee that we got all the dirty
|
|
|
|
* pages (see the implementation of write_cache_pages() for
|
|
|
|
* details). That being said, this is a no-op in most cases.
|
|
|
|
*/
|
|
|
|
wbc->sync_mode = sync_mode;
|
|
|
|
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
|
|
|
|
}
|
|
|
|
return (result);
|
2011-02-03 18:34:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write out dirty pages to the ARC, this function is only required to
|
|
|
|
* support mmap(2). Mapped pages may be dirtied by memory operations
|
|
|
|
* which never call .write(). These dirty pages are kept in sync with
|
|
|
|
* the ARC buffers via this hook.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zpl_writepage(struct page *pp, struct writeback_control *wbc)
|
|
|
|
{
|
Only commit the ZIL once in zpl_writepages() (msync() case).
Currently, using msync() results in the following code path:
sys_msync -> zpl_fsync -> filemap_write_and_wait_range -> zpl_writepages -> write_cache_pages -> zpl_putpage
In such a code path, zil_commit() is called as part of zpl_putpage().
This means that for each page, the write is handed to the DMU, the ZIL
is committed, and only then do we move on to the next page. As one might
imagine, this results in atrocious performance where there is a large
number of pages to write: instead of committing a batch of N writes,
we do N commits containing one page each. In some extreme cases this
can result in msync() being ~700 times slower than it should be, as well
as very inefficient use of ZIL resources.
This patch fixes this issue by making sure that the requested writes
are batched and then committed only once. Unfortunately, the
implementation is somewhat non-trivial because there is no way to run
write_cache_pages in SYNC mode (so that we get all pages) without
making it wait on the writeback tag for each page.
The solution implemented here is composed of two parts:
- I added a new callback system to the ZIL, which allows the caller to
be notified when its ITX gets written to stable storage. One nice
thing is that the callback is called not only in zil_commit() but
in zil_sync() as well, which means that the caller doesn't have to
care whether the write ended up in the ZIL or the DMU: it will get
notified as soon as it's safe, period. This is an improvement over
dmu_tx_callback_register() that was used previously, which only
supports DMU writes. The rationale for this change is to allow
zpl_putpage() to be notified when a ZIL commit is completed without
having to block on zil_commit() itself.
- zpl_writepages() now calls write_cache_pages in non-SYNC mode, which
will prevent (1) write_cache_pages from blocking, and (2) zpl_putpage
from issuing ZIL commits. zpl_writepages() will issue the commit
itself instead of relying on zpl_putpage() to do it, thus nicely
batching the writes. Note, however, that we still have to call
write_cache_pages() again in SYNC mode because there is an edge case
documented in the implementation of write_cache_pages() whereas it
will not give us all dirty pages when running in non-SYNC mode. Thus
we need to run it at least once in SYNC mode to make sure we honor
persistency guarantees. This only happens when the pages are
modified at the same time msync() is running, which should be rare.
In most cases there won't be any additional pages and this second
call will do nothing.
Note that this change also fixes a bug related to #907 whereas calling
msync() on pages that were already handed over to the DMU in a previous
writepages() call would make msync() block until the next TXG sync
instead of returning as soon as the ZIL commit is complete. The new
callback system fixes that problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1849
Closes #907
2013-11-10 15:00:11 +00:00
|
|
|
if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
|
|
|
|
wbc->sync_mode = WB_SYNC_ALL;
|
|
|
|
|
|
|
|
return (zpl_putpage(pp, wbc, pp->mapping));
|
2011-02-03 18:34:05 +00:00
|
|
|
}
|
|
|
|
|
2011-09-02 07:42:07 +00:00
|
|
|
/*
|
|
|
|
* The only flag combination which matches the behavior of zfs_space()
|
|
|
|
* is FALLOC_FL_PUNCH_HOLE. This flag was introduced in the 2.6.38 kernel.
|
|
|
|
*/
|
|
|
|
long
|
|
|
|
zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
|
|
|
|
{
|
|
|
|
cred_t *cr = CRED();
|
|
|
|
int error = -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (mode & FALLOC_FL_KEEP_SIZE)
|
|
|
|
return (-EOPNOTSUPP);
|
|
|
|
|
|
|
|
crhold(cr);
|
|
|
|
|
|
|
|
#ifdef FALLOC_FL_PUNCH_HOLE
|
|
|
|
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
|
|
|
flock64_t bf;
|
|
|
|
|
|
|
|
bf.l_type = F_WRLCK;
|
|
|
|
bf.l_whence = 0;
|
|
|
|
bf.l_start = offset;
|
|
|
|
bf.l_len = len;
|
|
|
|
bf.l_pid = 0;
|
|
|
|
|
|
|
|
error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
|
|
|
|
}
|
|
|
|
#endif /* FALLOC_FL_PUNCH_HOLE */
|
|
|
|
|
|
|
|
crfree(cr);
|
|
|
|
|
|
|
|
ASSERT3S(error, <=, 0);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_FILE_FALLOCATE
|
|
|
|
static long
|
|
|
|
zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
|
|
|
|
{
|
|
|
|
return zpl_fallocate_common(filp->f_path.dentry->d_inode,
|
|
|
|
mode, offset, len);
|
|
|
|
}
|
|
|
|
#endif /* HAVE_FILE_FALLOCATE */
|
|
|
|
|
2011-07-01 22:56:35 +00:00
|
|
|
/*
|
|
|
|
* Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
|
|
|
|
* attributes common to both Linux and Solaris are mapped.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
zpl_ioctl_getflags(struct file *filp, void __user *arg)
|
|
|
|
{
|
|
|
|
struct inode *ip = filp->f_dentry->d_inode;
|
|
|
|
unsigned int ioctl_flags = 0;
|
|
|
|
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (zfs_flags & ZFS_IMMUTABLE)
|
|
|
|
ioctl_flags |= FS_IMMUTABLE_FL;
|
|
|
|
|
|
|
|
if (zfs_flags & ZFS_APPENDONLY)
|
|
|
|
ioctl_flags |= FS_APPEND_FL;
|
|
|
|
|
|
|
|
if (zfs_flags & ZFS_NODUMP)
|
|
|
|
ioctl_flags |= FS_NODUMP_FL;
|
|
|
|
|
|
|
|
ioctl_flags &= FS_FL_USER_VISIBLE;
|
|
|
|
|
|
|
|
error = copy_to_user(arg, &ioctl_flags, sizeof (ioctl_flags));
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fchange() is a helper macro to detect if we have been asked to change a
|
|
|
|
* flag. This is ugly, but the requirement that we do this is a consequence of
|
|
|
|
* how the Linux file attribute interface was designed. Another consequence is
|
|
|
|
* that concurrent modification of files suffers from a TOCTOU race. Neither
|
|
|
|
* are things we can fix without modifying the kernel-userland interface, which
|
|
|
|
* is outside of our jurisdiction.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define fchange(f0, f1, b0, b1) ((((f0) & (b0)) == (b0)) != \
|
|
|
|
(((b1) & (f1)) == (f1)))
|
|
|
|
|
|
|
|
static int
|
|
|
|
zpl_ioctl_setflags(struct file *filp, void __user *arg)
|
|
|
|
{
|
|
|
|
struct inode *ip = filp->f_dentry->d_inode;
|
|
|
|
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
|
|
|
|
unsigned int ioctl_flags;
|
|
|
|
cred_t *cr = CRED();
|
|
|
|
xvattr_t xva;
|
|
|
|
xoptattr_t *xoap;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (copy_from_user(&ioctl_flags, arg, sizeof (ioctl_flags)))
|
|
|
|
return (-EFAULT);
|
|
|
|
|
|
|
|
if ((ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL)))
|
|
|
|
return (-EOPNOTSUPP);
|
|
|
|
|
|
|
|
if ((ioctl_flags & ~(FS_FL_USER_MODIFIABLE)))
|
|
|
|
return (-EACCES);
|
|
|
|
|
|
|
|
if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
|
|
|
|
fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
|
|
|
|
!capable(CAP_LINUX_IMMUTABLE))
|
|
|
|
return (-EACCES);
|
|
|
|
|
|
|
|
if (!zpl_inode_owner_or_capable(ip))
|
|
|
|
return (-EACCES);
|
|
|
|
|
|
|
|
xva_init(&xva);
|
|
|
|
xoap = xva_getxoptattr(&xva);
|
|
|
|
|
|
|
|
XVA_SET_REQ(&xva, XAT_IMMUTABLE);
|
|
|
|
if (ioctl_flags & FS_IMMUTABLE_FL)
|
|
|
|
xoap->xoa_immutable = B_TRUE;
|
|
|
|
|
|
|
|
XVA_SET_REQ(&xva, XAT_APPENDONLY);
|
|
|
|
if (ioctl_flags & FS_APPEND_FL)
|
|
|
|
xoap->xoa_appendonly = B_TRUE;
|
|
|
|
|
|
|
|
XVA_SET_REQ(&xva, XAT_NODUMP);
|
|
|
|
if (ioctl_flags & FS_NODUMP_FL)
|
|
|
|
xoap->xoa_nodump = B_TRUE;
|
|
|
|
|
|
|
|
crhold(cr);
|
|
|
|
error = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
|
|
|
|
crfree(cr);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2013-06-26 19:53:04 +00:00
|
|
|
static long
|
|
|
|
zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
2011-07-01 22:56:35 +00:00
|
|
|
case FS_IOC_GETFLAGS:
|
|
|
|
return (zpl_ioctl_getflags(filp, (void *)arg));
|
|
|
|
case FS_IOC_SETFLAGS:
|
|
|
|
return (zpl_ioctl_setflags(filp, (void *)arg));
|
2013-06-26 19:53:04 +00:00
|
|
|
default:
|
|
|
|
return (-ENOTTY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static long
|
|
|
|
zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
2013-11-01 19:26:11 +00:00
|
|
|
return (zpl_ioctl(filp, cmd, arg));
|
2013-06-26 19:53:04 +00:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
|
|
|
|
2011-01-26 20:03:58 +00:00
|
|
|
const struct address_space_operations zpl_address_space_operations = {
|
2011-05-28 01:53:07 +00:00
|
|
|
.readpages = zpl_readpages,
|
2011-01-26 20:03:58 +00:00
|
|
|
.readpage = zpl_readpage,
|
|
|
|
.writepage = zpl_writepage,
|
2013-11-01 19:26:11 +00:00
|
|
|
.writepages = zpl_writepages,
|
2011-01-26 20:03:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
const struct file_operations zpl_file_operations = {
|
2011-03-08 19:04:51 +00:00
|
|
|
.open = zpl_open,
|
|
|
|
.release = zpl_release,
|
2013-06-13 17:51:09 +00:00
|
|
|
.llseek = zpl_llseek,
|
2011-02-03 18:34:05 +00:00
|
|
|
.read = zpl_read,
|
|
|
|
.write = zpl_write,
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
.aio_read = zpl_aio_read,
|
|
|
|
.aio_write = zpl_aio_write,
|
2011-02-03 18:34:05 +00:00
|
|
|
.mmap = zpl_mmap,
|
2011-01-26 20:03:58 +00:00
|
|
|
.fsync = zpl_fsync,
|
Linux AIO Support
nfsd uses do_readv_writev() to implement fops->read and fops->write.
do_readv_writev() will attempt to read/write using fops->aio_read and
fops->aio_write, but it will fallback to fops->read and fops->write when
AIO is not available. However, the fallback will perform a call for each
individual data page. Since our default recordsize is 128KB, sequential
operations on NFS will generate 32 DMU transactions where only 1
transaction was needed. That was unnecessary overhead and we implement
fops->aio_read and fops->aio_write to eliminate it.
ZFS originated in OpenSolaris, where the AIO API is entirely implemented
in userland's libc by intelligently mapping them to VOP_WRITE, VOP_READ
and VOP_FSYNC. Linux implements AIO inside the kernel itself. Linux
filesystems therefore must implement their own AIO logic and nearly all
of them implement fops->aio_write synchronously. Consequently, they do
not implement aio_fsync(). However, since the ZPL works by mapping
Linux's VFS calls to the functions implementing Illumos' VFS operations,
we instead implement AIO in the kernel by mapping the operations to the
VOP_READ, VOP_WRITE and VOP_FSYNC equivalents. We therefore implement
fops->aio_fsync.
One might be inclined to make our fops->aio_write implementation
synchronous to make software that expects this behavior safe. However,
there are several reasons not to do this:
1. Other platforms do not implement aio_write() synchronously and since
the majority of userland software using AIO should be cross platform,
expectations of synchronous behavior should not be a problem.
2. We would hurt the performance of programs that use POSIX interfaces
properly while simultaneously encouraging the creation of more
non-compliant software.
3. The broader community concluded that userland software should be
patched to properly use POSIX interfaces instead of implementing hacks
in filesystems to cater to broken software. This concept is best
described as the O_PONIES debate.
4. Making an asynchronous write synchronous is non sequitur.
Any software dependent on synchronous aio_write behavior will suffer
data loss on ZFSOnLinux in a kernel panic / system failure of at most
zfs_txg_timeout seconds, which by default is 5 seconds. This seems like
a reasonable consequence of using non-compliant software.
It should be noted that this is also a problem in the kernel itself
where nfsd does not pass O_SYNC on files opened with it and instead
relies on a open()/write()/close() to enforce synchronous behavior when
the flush is only guarenteed on last close.
Exporting any filesystem that does not implement AIO via NFS risks data
loss in the event of a kernel panic / system failure when something else
is also accessing the file. Exporting any file system that implements
AIO the way this patch does bears similar risk. However, it seems
reasonable to forgo crippling our AIO implementation in favor of
developing patches to fix this problem in Linux's nfsd for the reasons
stated earlier. In the interim, the risk will remain. Failing to
implement AIO will not change the problem that nfsd created, so there is
no reason for nfsd's mistake to block our implementation of AIO.
It also should be noted that `aio_cancel()` will always return
`AIO_NOTCANCELED` under this implementation. It is possible to implement
aio_cancel by deferring work to taskqs and use `kiocb_set_cancel_fn()`
to set a callback function for cancelling work sent to taskqs, but the
simpler approach is allowed by the specification:
```
Which operations are cancelable is implementation-defined.
```
http://pubs.opengroup.org/onlinepubs/009695399/functions/aio_cancel.html
The only programs on my system that are capable of using `aio_cancel()`
are QEMU, beecrypt and fio use it according to a recursive grep of my
system's `/usr/src/debug`. That suggests that `aio_cancel()` users are
rare. Implementing aio_cancel() is left to a future date when it is
clear that there are consumers that benefit from its implementation to
justify the work.
Lastly, it is important to know that handling of the iovec updates differs
between Illumos and Linux in the implementation of read/write. On Linux,
it is the VFS' responsibility whle on Illumos, it is the filesystem's
responsibility. We take the intermediate solution of copying the iovec
so that the ZFS code can update it like on Solaris while leaving the
originals alone. This imposes some overhead. We could always revisit
this should profiling show that the allocations are a problem.
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #223
Closes #2373
2014-08-04 11:09:32 +00:00
|
|
|
.aio_fsync = zpl_aio_fsync,
|
2011-09-02 07:42:07 +00:00
|
|
|
#ifdef HAVE_FILE_FALLOCATE
|
2013-11-01 19:26:11 +00:00
|
|
|
.fallocate = zpl_fallocate,
|
2011-09-02 07:42:07 +00:00
|
|
|
#endif /* HAVE_FILE_FALLOCATE */
|
2013-11-01 19:26:11 +00:00
|
|
|
.unlocked_ioctl = zpl_ioctl,
|
2013-06-26 19:53:04 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
2013-11-01 19:26:11 +00:00
|
|
|
.compat_ioctl = zpl_compat_ioctl,
|
2013-06-26 19:53:04 +00:00
|
|
|
#endif
|
2011-01-26 20:03:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
const struct file_operations zpl_dir_file_operations = {
|
|
|
|
.llseek = generic_file_llseek,
|
|
|
|
.read = generic_read_dir,
|
2013-08-07 12:53:45 +00:00
|
|
|
#ifdef HAVE_VFS_ITERATE
|
|
|
|
.iterate = zpl_iterate,
|
|
|
|
#else
|
2011-01-26 20:03:58 +00:00
|
|
|
.readdir = zpl_readdir,
|
2013-08-07 12:53:45 +00:00
|
|
|
#endif
|
2011-01-26 20:03:58 +00:00
|
|
|
.fsync = zpl_fsync,
|
2013-06-26 19:53:04 +00:00
|
|
|
.unlocked_ioctl = zpl_ioctl,
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
.compat_ioctl = zpl_compat_ioctl,
|
|
|
|
#endif
|
2011-01-26 20:03:58 +00:00
|
|
|
};
|