Allow shared lookups for nullfs mounts, if lower filesystem supports

it.  There are two problems which shall be addressed for shared
lookups use to have measurable effect on nullfs scalability:

1. When vfs_lookup() calls VOP_LOOKUP() for nullfs, which passes lookup
operation to lower fs, resulting vnode is often only shared-locked. Then
null_nodeget() cannot instantiate covering vnode for lower vnode, since
insmntque1() and null_hashins() require exclusive lock on the lower.

Change the assert that lower vnode is exclusively locked to only
require any lock.  If null hash failed to find pre-existing nullfs
vnode for lower vnode and the vnode is shared-locked, the lower vnode
lock is upgraded.

2. Nullfs reclaims its vnodes on deactivation. This is due to nullfs
inability to detect reclamation of the lower vnode.  Reclamation of a
nullfs vnode at deactivation time prevents a reference to the lower
vnode to become stale.

Change nullfs VOP_INACTIVE to not reclaim the vnode, instead use the
VFS_RECLAIM_LOWERVP to get notification and reclaim upper vnode
together with the reclamation of the lower vnode.

Note that nullfs reclamation procedure calls vput() on the lowervp
vnode, temporary unlocking the vnode being reclaimed. This seems to be
fine for MPSAFE filesystems, but not-MPSAFE code often put partially
initialized vnode on some globally visible list, and later can decide
that half-constructed vnode is not needed.  If nullfs mount is created
above such filesystem, then other threads might catch such not
properly initialized vnode. Instead of trying to overcome this case,
e.g. by recursing the lower vnode lock in null_reclaim_lowervp(), I
decided to rely on nearby removal of the support for non-MPSAFE
filesystems.

In collaboration with:	pho
MFC after:	3 weeks
This commit is contained in:
Konstantin Belousov 2012-09-09 19:20:23 +00:00
parent bcd5bb8e57
commit d9e9650a36
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=240285
4 changed files with 68 additions and 47 deletions

View File

@ -56,6 +56,7 @@ struct null_node {
int nullfs_init(struct vfsconf *vfsp);
int nullfs_uninit(struct vfsconf *vfsp);
int null_nodeget(struct mount *mp, struct vnode *target, struct vnode **vpp);
struct vnode *null_hashget(struct mount *mp, struct vnode *lowervp);
void null_hashrem(struct null_node *xp);
int null_bypass(struct vop_generic_args *ap);

View File

@ -67,7 +67,6 @@ struct mtx null_hashmtx;
static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table");
MALLOC_DEFINE(M_NULLFSNODE, "nullfs_node", "NULLFS vnode private part");
static struct vnode * null_hashget(struct mount *, struct vnode *);
static struct vnode * null_hashins(struct mount *, struct null_node *);
/*
@ -98,7 +97,7 @@ nullfs_uninit(vfsp)
* Return a VREF'ed alias for lower vnode if already exists, else 0.
* Lower vnode should be locked on entry and will be left locked on exit.
*/
static struct vnode *
struct vnode *
null_hashget(mp, lowervp)
struct mount *mp;
struct vnode *lowervp;
@ -209,20 +208,29 @@ null_nodeget(mp, lowervp, vpp)
struct vnode *vp;
int error;
/*
* The insmntque1() call below requires the exclusive lock on
* the nullfs vnode.
*/
ASSERT_VOP_ELOCKED(lowervp, "lowervp");
KASSERT(lowervp->v_usecount >= 1, ("Unreferenced vnode %p\n", lowervp));
ASSERT_VOP_LOCKED(lowervp, "lowervp");
KASSERT(lowervp->v_usecount >= 1, ("Unreferenced vnode %p", lowervp));
/* Lookup the hash firstly */
/* Lookup the hash firstly. */
*vpp = null_hashget(mp, lowervp);
if (*vpp != NULL) {
vrele(lowervp);
return (0);
}
/*
* The insmntque1() call below requires the exclusive lock on
* the nullfs vnode. Upgrade the lock now if hash failed to
* provide ready to use vnode.
*/
if (VOP_ISLOCKED(lowervp) != LK_EXCLUSIVE) {
vn_lock(lowervp, LK_UPGRADE | LK_RETRY);
if ((lowervp->v_iflag & VI_DOOMED) != 0) {
vput(lowervp);
return (ENOENT);
}
}
/*
* We do not serialize vnode creation, instead we will check for
* duplicates later, when adding new vnode to hash.
@ -233,8 +241,7 @@ null_nodeget(mp, lowervp, vpp)
* might cause a bogus v_data pointer to get dereferenced
* elsewhere if MALLOC should block.
*/
xp = malloc(sizeof(struct null_node),
M_NULLFSNODE, M_WAITOK);
xp = malloc(sizeof(struct null_node), M_NULLFSNODE, M_WAITOK);
error = getnewvnode("null", mp, &null_vnodeops, &vp);
if (error) {

View File

@ -65,6 +65,7 @@ static vfs_statfs_t nullfs_statfs;
static vfs_unmount_t nullfs_unmount;
static vfs_vget_t nullfs_vget;
static vfs_extattrctl_t nullfs_extattrctl;
static vfs_reclaim_lowervp_t nullfs_reclaim_lowervp;
/*
* Mount null layer
@ -121,8 +122,10 @@ nullfs_mount(struct mount *mp)
*/
NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target, curthread);
error = namei(ndp);
/*
* Re-lock vnode.
* XXXKIB This is deadlock-prone as well.
*/
if (isvnunlocked)
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY);
@ -146,7 +149,7 @@ nullfs_mount(struct mount *mp)
}
xmp = (struct null_mount *) malloc(sizeof(struct null_mount),
M_NULLFSMNT, M_WAITOK); /* XXX */
M_NULLFSMNT, M_WAITOK);
/*
* Save reference to underlying FS
@ -186,10 +189,15 @@ nullfs_mount(struct mount *mp)
}
MNT_ILOCK(mp);
mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag &
(MNTK_MPSAFE | MNTK_SHARED_WRITES);
(MNTK_MPSAFE | MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED |
MNTK_EXTENDED_SHARED);
mp->mnt_kern_flag |= MNTK_LOOKUP_EXCL_DOTDOT;
MNT_IUNLOCK(mp);
mp->mnt_data = xmp;
vfs_getnewfsid(mp);
MNT_ILOCK(xmp->nullm_vfs);
TAILQ_INSERT_TAIL(&xmp->nullm_vfs->mnt_uppers, mp, mnt_upper_link);
MNT_IUNLOCK(xmp->nullm_vfs);
vfs_mountedfrom(mp, target);
@ -206,14 +214,16 @@ nullfs_unmount(mp, mntflags)
struct mount *mp;
int mntflags;
{
void *mntdata;
int error;
int flags = 0;
struct null_mount *mntdata;
struct mount *ump;
int error, flags;
NULLFSDEBUG("nullfs_unmount: mp = %p\n", (void *)mp);
if (mntflags & MNT_FORCE)
flags |= FORCECLOSE;
flags = FORCECLOSE;
else
flags = 0;
/* There is 1 extra root vnode reference (nullm_rootvp). */
error = vflush(mp, 1, flags, curthread);
@ -224,9 +234,17 @@ nullfs_unmount(mp, mntflags)
* Finally, throw away the null_mount structure
*/
mntdata = mp->mnt_data;
ump = mntdata->nullm_vfs;
MNT_ILOCK(ump);
while ((ump->mnt_kern_flag & MNTK_VGONE_UPPER) != 0) {
ump->mnt_kern_flag |= MNTK_VGONE_WAITER;
msleep(&ump->mnt_uppers, &ump->mnt_mtx, 0, "vgnupw", 0);
}
TAILQ_REMOVE(&ump->mnt_uppers, mp, mnt_upper_link);
MNT_IUNLOCK(ump);
mp->mnt_data = NULL;
free(mntdata, M_NULLFSMNT);
return 0;
return (0);
}
static int
@ -316,13 +334,10 @@ nullfs_vget(mp, ino, flags, vpp)
KASSERT((flags & LK_TYPE_MASK) != 0,
("nullfs_vget: no lock requested"));
flags &= ~LK_TYPE_MASK;
flags |= LK_EXCLUSIVE;
error = VFS_VGET(MOUNTTONULLMOUNT(mp)->nullm_vfs, ino, flags, vpp);
if (error)
if (error != 0)
return (error);
return (null_nodeget(mp, *vpp, vpp));
}
@ -334,11 +349,11 @@ nullfs_fhtovp(mp, fidp, flags, vpp)
struct vnode **vpp;
{
int error;
error = VFS_FHTOVP(MOUNTTONULLMOUNT(mp)->nullm_vfs, fidp, LK_EXCLUSIVE,
vpp);
if (error)
return (error);
error = VFS_FHTOVP(MOUNTTONULLMOUNT(mp)->nullm_vfs, fidp, flags,
vpp);
if (error != 0)
return (error);
return (null_nodeget(mp, *vpp, vpp));
}
@ -350,10 +365,22 @@ nullfs_extattrctl(mp, cmd, filename_vp, namespace, attrname)
int namespace;
const char *attrname;
{
return VFS_EXTATTRCTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, filename_vp,
namespace, attrname);
return (VFS_EXTATTRCTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd,
filename_vp, namespace, attrname));
}
static void
nullfs_reclaim_lowervp(struct mount *mp, struct vnode *lowervp)
{
struct vnode *vp;
vp = null_hashget(mp, lowervp);
if (vp == NULL)
return;
vgone(vp);
vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
}
static struct vfsops null_vfsops = {
.vfs_extattrctl = nullfs_extattrctl,
@ -367,6 +394,7 @@ static struct vfsops null_vfsops = {
.vfs_uninit = nullfs_uninit,
.vfs_unmount = nullfs_unmount,
.vfs_vget = nullfs_vget,
.vfs_reclaim_lowervp = nullfs_reclaim_lowervp,
};
VFS_SET(null_vfsops, nullfs, VFCF_LOOPBACK | VFCF_JAIL);

View File

@ -665,33 +665,18 @@ null_unlock(struct vop_unlock_args *ap)
}
/*
* There is no way to tell that someone issued remove/rmdir operation
* on the underlying filesystem. For now we just have to release lowervp
* as soon as possible.
*
* Note, we can't release any resources nor remove vnode from hash before
* appropriate VXLOCK stuff is done because other process can find this
* vnode in hash during inactivation and may be sitting in vget() and waiting
* for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM.
* XXXKIB
*/
static int
null_inactive(struct vop_inactive_args *ap)
null_inactive(struct vop_inactive_args *ap __unused)
{
struct vnode *vp = ap->a_vp;
vp->v_object = NULL;
/*
* If this is the last reference, then free up the vnode
* so as not to tie up the lower vnodes.
*/
vrecycle(vp);
return (0);
}
/*
* Now, the VXLOCK is in force and we're free to destroy the null vnode.
* Now, the nullfs vnode and, due to the sharing lock, the lower
* vnode, are exclusively locked, and we shall destroy the null vnode.
*/
static int
null_reclaim(struct vop_reclaim_args *ap)