Remove znode move functionality

Unlike Solaris the Linux implementation embeds the inode in the
znode, and has no use for a vnode.  So while it's true that fragmention
of the znode cache may occur it should not be worse than any of the
other Linux FS inode caches.  Until proven that this is a problem it's
just added complexity we don't need.
This commit is contained in:
Brian Behlendorf 2011-01-07 12:40:30 -08:00
parent f30484afc3
commit 5649246dd3

View File

@ -160,189 +160,6 @@ zfs_znode_cache_destructor(void *buf, void *arg)
ASSERT(zp->z_acl_cached == NULL);
}
#ifdef ZNODE_STATS
static struct {
uint64_t zms_zfsvfs_invalid;
uint64_t zms_zfsvfs_recheck1;
uint64_t zms_zfsvfs_unmounted;
uint64_t zms_zfsvfs_recheck2;
uint64_t zms_obj_held;
uint64_t zms_vnode_locked;
uint64_t zms_not_only_dnlc;
} znode_move_stats;
#endif /* ZNODE_STATS */
static void
zfs_znode_move_impl(znode_t *ozp, znode_t *nzp)
{
vnode_t *vp;
/* Copy fields. */
nzp->z_zfsvfs = ozp->z_zfsvfs;
/* Swap vnodes. */
vp = nzp->z_vnode;
nzp->z_vnode = ozp->z_vnode;
ozp->z_vnode = vp; /* let destructor free the overwritten vnode */
ZTOV(ozp)->v_data = ozp;
ZTOV(nzp)->v_data = nzp;
nzp->z_id = ozp->z_id;
ASSERT(ozp->z_dirlocks == NULL); /* znode not in use */
ASSERT(avl_numnodes(&ozp->z_range_avl) == 0);
nzp->z_unlinked = ozp->z_unlinked;
nzp->z_atime_dirty = ozp->z_atime_dirty;
nzp->z_zn_prefetch = ozp->z_zn_prefetch;
nzp->z_blksz = ozp->z_blksz;
nzp->z_seq = ozp->z_seq;
nzp->z_mapcnt = ozp->z_mapcnt;
nzp->z_gen = ozp->z_gen;
nzp->z_sync_cnt = ozp->z_sync_cnt;
nzp->z_is_sa = ozp->z_is_sa;
nzp->z_sa_hdl = ozp->z_sa_hdl;
bcopy(ozp->z_atime, nzp->z_atime, sizeof (uint64_t) * 2);
nzp->z_links = ozp->z_links;
nzp->z_size = ozp->z_size;
nzp->z_pflags = ozp->z_pflags;
nzp->z_uid = ozp->z_uid;
nzp->z_gid = ozp->z_gid;
nzp->z_mode = ozp->z_mode;
/*
* Since this is just an idle znode and kmem is already dealing with
* memory pressure, release any cached ACL.
*/
if (ozp->z_acl_cached) {
zfs_acl_free(ozp->z_acl_cached);
ozp->z_acl_cached = NULL;
}
sa_set_userp(nzp->z_sa_hdl, nzp);
/*
* Invalidate the original znode by clearing fields that provide a
* pointer back to the znode. Set the low bit of the vfs pointer to
* ensure that zfs_znode_move() recognizes the znode as invalid in any
* subsequent callback.
*/
ozp->z_sa_hdl = NULL;
POINTER_INVALIDATE(&ozp->z_zfsvfs);
/*
* Mark the znode.
*/
nzp->z_moved = 1;
ozp->z_moved = (uint8_t)-1;
}
/*ARGSUSED*/
static kmem_cbrc_t
zfs_znode_move(void *buf, void *newbuf, size_t size, void *arg)
{
znode_t *ozp = buf, *nzp = newbuf;
zfsvfs_t *zfsvfs;
vnode_t *vp;
/*
* The znode is on the file system's list of known znodes if the vfs
* pointer is valid. We set the low bit of the vfs pointer when freeing
* the znode to invalidate it, and the memory patterns written by kmem
* (baddcafe and deadbeef) set at least one of the two low bits. A newly
* created znode sets the vfs pointer last of all to indicate that the
* znode is known and in a valid state to be moved by this function.
*/
zfsvfs = ozp->z_zfsvfs;
if (!POINTER_IS_VALID(zfsvfs)) {
ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_invalid);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* Close a small window in which it's possible that the filesystem could
* be unmounted and freed, and zfsvfs, though valid in the previous
* statement, could point to unrelated memory by the time we try to
* prevent the filesystem from being unmounted.
*/
rw_enter(&zfsvfs_lock, RW_WRITER);
if (zfsvfs != ozp->z_zfsvfs) {
rw_exit(&zfsvfs_lock);
ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck1);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* If the znode is still valid, then so is the file system. We know that
* no valid file system can be freed while we hold zfsvfs_lock, so we
* can safely ensure that the filesystem is not and will not be
* unmounted. The next statement is equivalent to ZFS_ENTER().
*/
rrw_enter(&zfsvfs->z_teardown_lock, RW_READER, FTAG);
if (zfsvfs->z_unmounted) {
ZFS_EXIT(zfsvfs);
rw_exit(&zfsvfs_lock);
ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_unmounted);
return (KMEM_CBRC_DONT_KNOW);
}
rw_exit(&zfsvfs_lock);
mutex_enter(&zfsvfs->z_znodes_lock);
/*
* Recheck the vfs pointer in case the znode was removed just before
* acquiring the lock.
*/
if (zfsvfs != ozp->z_zfsvfs) {
mutex_exit(&zfsvfs->z_znodes_lock);
ZFS_EXIT(zfsvfs);
ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck2);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* At this point we know that as long as we hold z_znodes_lock, the
* znode cannot be freed and fields within the znode can be safely
* accessed. Now, prevent a race with zfs_zget().
*/
if (ZFS_OBJ_HOLD_TRYENTER(zfsvfs, ozp->z_id) == 0) {
mutex_exit(&zfsvfs->z_znodes_lock);
ZFS_EXIT(zfsvfs);
ZNODE_STAT_ADD(znode_move_stats.zms_obj_held);
return (KMEM_CBRC_LATER);
}
vp = ZTOV(ozp);
if (mutex_tryenter(&vp->v_lock) == 0) {
ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
mutex_exit(&zfsvfs->z_znodes_lock);
ZFS_EXIT(zfsvfs);
ZNODE_STAT_ADD(znode_move_stats.zms_vnode_locked);
return (KMEM_CBRC_LATER);
}
/* Only move znodes that are referenced _only_ by the DNLC. */
if (vp->v_count != 1 || !vn_in_dnlc(vp)) {
mutex_exit(&vp->v_lock);
ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
mutex_exit(&zfsvfs->z_znodes_lock);
ZFS_EXIT(zfsvfs);
ZNODE_STAT_ADD(znode_move_stats.zms_not_only_dnlc);
return (KMEM_CBRC_LATER);
}
/*
* The znode is known and in a valid state to move. We're holding the
* locks needed to execute the critical section.
*/
zfs_znode_move_impl(ozp, nzp);
mutex_exit(&vp->v_lock);
ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id);
list_link_replace(&ozp->z_link_node, &nzp->z_link_node);
mutex_exit(&zfsvfs->z_znodes_lock);
ZFS_EXIT(zfsvfs);
return (KMEM_CBRC_YES);
}
void
zfs_znode_init(void)
{
@ -354,7 +171,6 @@ zfs_znode_init(void)
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
kmem_cache_set_move(znode_cache, zfs_znode_move);
}
void