fusefs: multiple fixes related to the write cache

* Don't always write the last page synchronously.  That's not actually
  required.  It was probably just masking another bug that I fixed later,
  possibly in r349021.

* Enable the NotifyWriteback tests now that Writeback cache is working.

* Add a test to ensure that the write cache isn't flushed synchronously when
  in writeback mode.

Sponsored by:	The FreeBSD Foundation
This commit is contained in:
asomers 2019-06-17 23:34:11 +00:00
parent 3decbc7870
commit 0bb4d86c57
3 changed files with 92 additions and 28 deletions

View File

@ -789,23 +789,7 @@ fuse_write_biobackend(struct vnode *vp, struct uio *uio,
vfs_bio_set_flags(bp, ioflag);
if (last_page) {
/*
* When writing the last page of a file we must write
* synchronously. If we didn't, then a subsequent
* operation could extend the file, making the last
* page of this buffer invalid because it would only be
* partially cached.
*
* As an optimization, it would be allowable to only
* write the last page synchronously. Or, it should be
* possible to synchronously flush the last
* already-written page whenever extending a file with
* ftruncate or another write.
*/
SDT_PROBE2(fusefs, , io, write_biobackend_issue, 1, bp);
err = bwrite(bp);
} else if (ioflag & IO_SYNC) {
if (ioflag & IO_SYNC) {
SDT_PROBE2(fusefs, , io, write_biobackend_issue, 2, bp);
err = bwrite(bp);
} else if (vm_page_count_severe() ||

View File

@ -50,6 +50,18 @@ using namespace testing;
class Notify: public FuseTest {
public:
/* Ignore an optional FUSE_FSYNC */
void maybe_expect_fsync(uint64_t ino)
{
EXPECT_CALL(*m_mock, process(
ResultOf([=](auto in) {
return (in.header.opcode == FUSE_FSYNC &&
in.header.nodeid == ino);
}, Eq(true)),
_)
).WillOnce(Invoke(ReturnErrno(0)));
}
void expect_lookup(uint64_t parent, const char *relpath, uint64_t ino,
off_t size, Sequence &seq)
{
@ -76,6 +88,7 @@ virtual void SetUp() {
int val = 0;
size_t size = sizeof(val);
m_async = true;
Notify::SetUp();
if (IsSkipped())
return;
@ -363,8 +376,7 @@ TEST_F(Notify, inval_inode_with_clean_cache)
/* Deliberately leak fd. close(2) will be tested in release.cc */
}
/* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238312 */
TEST_F(NotifyWriteback, DISABLED_inval_inode_with_dirty_cache)
TEST_F(NotifyWriteback, inval_inode_with_dirty_cache)
{
const static char FULLPATH[] = "mountpoint/foo";
const static char RELPATH[] = "foo";
@ -384,8 +396,14 @@ TEST_F(NotifyWriteback, DISABLED_inval_inode_with_dirty_cache)
fd = open(FULLPATH, O_RDWR);
ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
/* Evict the data cache */
expect_write(ino, 0, bufsize, CONTENTS);
/*
* The FUSE protocol does not require an fsync here, but FreeBSD's
* bufobj_invalbuf sends it anyway
*/
maybe_expect_fsync(ino);
/* Evict the data cache */
iia.mock = m_mock;
iia.ino = ino;
iia.off = 0;
@ -398,8 +416,7 @@ TEST_F(NotifyWriteback, DISABLED_inval_inode_with_dirty_cache)
/* Deliberately leak fd. close(2) will be tested in release.cc */
}
/* https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238312 */
TEST_F(NotifyWriteback, DISABLED_inval_inode_attrs_only)
TEST_F(NotifyWriteback, inval_inode_attrs_only)
{
const static char FULLPATH[] = "mountpoint/foo";
const static char RELPATH[] = "foo";
@ -425,7 +442,7 @@ TEST_F(NotifyWriteback, DISABLED_inval_inode_attrs_only)
EXPECT_CALL(*m_mock, process(
ResultOf([=](auto in) {
return (in.header.opcode == FUSE_GETATTR &&
in.header.nodeid == FUSE_ROOT_ID);
in.header.nodeid == ino);
}, Eq(true)),
_)
).WillOnce(Invoke(ReturnImmediate([=](auto i __unused, auto& out) {

View File

@ -90,6 +90,31 @@ void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
FuseTest::expect_write(ino, offset, isize, osize, 0, 0, contents);
}
/* Expect a write that may or may not come, depending on the cache mode */
void maybe_expect_write(uint64_t ino, uint64_t offset, uint64_t size,
const void *contents)
{
EXPECT_CALL(*m_mock, process(
ResultOf([=](auto in) {
const char *buf = (const char*)in.body.bytes +
sizeof(struct fuse_write_in);
return (in.header.opcode == FUSE_WRITE &&
in.header.nodeid == ino &&
in.body.write.offset == offset &&
in.body.write.size == size &&
0 == bcmp(buf, contents, size));
}, Eq(true)),
_)
).Times(AtMost(1))
.WillRepeatedly(Invoke(
ReturnImmediate([=](auto in __unused, auto& out) {
SET_OUT_HEADER_LEN(out, write);
out.body.write.size = size;
})
));
}
};
class WriteCacheable: public Write {
@ -196,6 +221,14 @@ void expect_write(uint64_t ino, uint64_t offset, uint64_t isize,
}
};
class WriteBackAsync: public WriteBack {
public:
virtual void SetUp() {
m_async = true;
WriteBack::SetUp();
}
};
/* Tests for clustered writes with WriteBack cacheing */
class WriteCluster: public WriteBack {
public:
@ -302,7 +335,7 @@ TEST_F(Write, append_to_cached)
expect_lookup(RELPATH, ino, oldsize);
expect_open(ino, 0, 1);
expect_read(ino, 0, oldsize, oldsize, oldcontents);
expect_write(ino, oldsize, BUFSIZE, BUFSIZE, CONTENTS);
maybe_expect_write(ino, oldsize, BUFSIZE, CONTENTS);
/* Must open O_RDWR or fuse(4) implicitly sets direct_io */
fd = open(FULLPATH, O_RDWR | O_APPEND);
@ -610,7 +643,7 @@ TEST_F(Write, write_large)
expect_lookup(RELPATH, ino, 0);
expect_open(ino, 0, 1);
expect_write(ino, 0, halfbufsize, halfbufsize, contents);
expect_write(ino, halfbufsize, halfbufsize, halfbufsize,
maybe_expect_write(ino, halfbufsize, halfbufsize,
&contents[halfbufsize / sizeof(int)]);
fd = open(FULLPATH, O_WRONLY);
@ -662,7 +695,7 @@ TEST_F(Write_7_8, write)
}
/* In writeback mode, dirty data should be written on close */
TEST_F(WriteBack, close)
TEST_F(WriteBackAsync, close)
{
const char FULLPATH[] = "mountpoint/some_file.txt";
const char RELPATH[] = "some_file.txt";
@ -795,7 +828,7 @@ TEST_F(WriteBack, rmw)
FuseTest::expect_lookup(RELPATH, ino, S_IFREG | 0644, fsize, 1);
expect_open(ino, 0, 1);
expect_read(ino, 0, fsize, fsize, INITIAL, O_WRONLY);
expect_write(ino, offset, bufsize, bufsize, CONTENTS);
maybe_expect_write(ino, offset, bufsize, CONTENTS);
fd = open(FULLPATH, O_WRONLY);
EXPECT_LE(0, fd) << strerror(errno);
@ -808,7 +841,7 @@ TEST_F(WriteBack, rmw)
/*
* Without direct_io, writes should be committed to cache
*/
TEST_F(WriteBack, writeback)
TEST_F(WriteBack, cache)
{
const char FULLPATH[] = "mountpoint/some_file.txt";
const char RELPATH[] = "some_file.txt";
@ -867,6 +900,36 @@ TEST_F(WriteBack, o_direct)
/* Deliberately leak fd. close(2) will be tested in release.cc */
}
/*
* When mounted with -o async, the writeback cache mode should delay writes
*/
TEST_F(WriteBackAsync, delay)
{
const char FULLPATH[] = "mountpoint/some_file.txt";
const char RELPATH[] = "some_file.txt";
const char *CONTENTS = "abcdefgh";
uint64_t ino = 42;
int fd;
ssize_t bufsize = strlen(CONTENTS);
expect_lookup(RELPATH, ino, 0);
expect_open(ino, 0, 1);
/* Write should be cached, but FUSE_WRITE shouldn't be sent */
EXPECT_CALL(*m_mock, process(
ResultOf([=](auto in) {
return (in.header.opcode == FUSE_WRITE);
}, Eq(true)),
_)
).Times(0);
fd = open(FULLPATH, O_RDWR);
EXPECT_LE(0, fd) << strerror(errno);
ASSERT_EQ(bufsize, write(fd, CONTENTS, bufsize)) << strerror(errno);
/* Don't close the file because that would flush the cache */
}
/*
* Without direct_io, writes should be committed to cache
*/