Make insmntque() externally visibile and allow it to fail (e.g. during

late stages of unmount).  On failure, the vnode is recycled.

Add insmntque1(), to allow for file system specific cleanup when
recycling vnode on failure.

Change getnewvnode() to no longer call insmntque().  Previously,
embryonic vnodes were put onto the list of vnode belonging to a file
system, which is unsafe for a file system marked MPSAFE.

Change vfs_hash_insert() to no longer lock the vnode.  The caller now
has that responsibility.

Change most file systems to lock the vnode and call insmntque() or
insmntque1() after a new vnode has been sufficiently setup.  Handle
failed insmntque*() calls by propagating errors to callers, possibly
after some file system specific cleanup.

Approved by:	re (kensmith)
Reviewed by:	kib
In collaboration with:	kib
This commit is contained in:
Tor Egge 2007-03-13 01:50:27 +00:00
parent 06e83c7e86
commit 61b9d89ff0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=167497
26 changed files with 229 additions and 21 deletions

View File

@ -1797,6 +1797,9 @@ make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
if (err) {
panic("coda: getnewvnode returned error %d\n", err);
}
err = insmntque1(vp, vfsp, NULL, NULL); /* XXX: Too early for mpsafe fs */
if (err != 0)
panic("coda: insmntque failed: error %d", err);
vp->v_data = cp;
vp->v_type = type;
cp->c_vnode = vp;

View File

@ -663,8 +663,10 @@ cd9660_vget_internal(mp, ino, flags, vpp, relocated, isodir)
struct vnode *vp;
struct cdev *dev;
int error;
struct thread *td;
error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
td = curthread;
error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL);
if (error || *vpp != NULL)
return (error);
@ -682,7 +684,14 @@ cd9660_vget_internal(mp, ino, flags, vpp, relocated, isodir)
ip->i_vnode = vp;
ip->i_number = ino;
error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
error = insmntque(vp, mp);
if (error != 0) {
free(ip, M_ISOFSNODE);
*vpp = NULLVP;
return (error);
}
error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
if (error || *vpp != NULL)
return (error);

View File

@ -1797,6 +1797,9 @@ make_coda_node(CodaFid *fid, struct mount *vfsp, short type)
if (err) {
panic("coda: getnewvnode returned error %d\n", err);
}
err = insmntque1(vp, vfsp, NULL, NULL); /* XXX: Too early for mpsafe fs */
if (err != 0)
panic("coda: insmntque failed: error %d", err);
vp->v_data = cp;
vp->v_type = type;
cp->c_vnode = vp;

View File

@ -154,6 +154,20 @@ devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
return (not_found);
}
static void
devfs_insmntque_dtr(struct vnode *vp, void *arg)
{
struct devfs_dirent *de;
de = (struct devfs_dirent *)arg;
mtx_lock(&devfs_de_interlock);
vp->v_data = NULL;
de->de_vnode = NULL;
mtx_unlock(&devfs_de_interlock);
vgone(vp);
vput(vp);
}
/*
* devfs_allocv shall be entered with dmp->dm_lock held, and it drops
* it on return.
@ -230,13 +244,16 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp, stru
} else {
vp->v_type = VBAD;
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
mtx_lock(&devfs_de_interlock);
vp->v_data = de;
de->de_vnode = vp;
mtx_unlock(&devfs_de_interlock);
sx_xunlock(&dmp->dm_lock);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
sx_xlock(&dmp->dm_lock);
error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
if (error != 0) {
(void) devfs_allocv_drop_refs(1, dmp, de);
return (error);
}
if (devfs_allocv_drop_refs(0, dmp, de)) {
vput(vp);
return (ENOENT);

View File

@ -151,6 +151,13 @@ fdesc_allocvp(ftype, ix, mp, vpp, td)
fd->fd_type = ftype;
fd->fd_fd = -1;
fd->fd_ix = ix;
/* XXX: vnode should be locked here */
error = insmntque(*vpp, mp); /* XXX: Too early for mpsafe fs */
if (error != 0) {
free(fd, M_TEMP);
*vpp = NULLVP;
goto out;
}
LIST_INSERT_HEAD(fc, fd, fd_hash);
out:

View File

@ -445,6 +445,7 @@ hpfs_vget(
struct hpfsnode *hp;
struct buf *bp;
int error;
struct thread *td;
dprintf(("hpfs_vget(0x%x): ",ino));
@ -471,7 +472,7 @@ hpfs_vget(
MALLOC(hp, struct hpfsnode *, sizeof(struct hpfsnode),
M_HPFSNO, M_WAITOK);
error = getnewvnode("hpfs", hpmp->hpm_mp, &hpfs_vnodeops, &vp);
error = getnewvnode("hpfs", mp, &hpfs_vnodeops, &vp);
if (error) {
printf("hpfs_vget: can't get new vnode\n");
FREE(hp, M_HPFSNO);
@ -498,7 +499,14 @@ hpfs_vget(
hp->h_mode = hpmp->hpm_mode;
hp->h_devvp = hpmp->hpm_devvp;
error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
td = curthread;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
error = insmntque(vp, mp);
if (error != 0) {
free(hp, M_HPFSNO);
return (error);
}
error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
if (error || *vpp != NULL)
return (error);

View File

@ -107,6 +107,7 @@ deget(pmp, dirclust, diroffset, depp)
struct denode *ldep;
struct vnode *nvp, *xvp;
struct buf *bp;
struct thread *td;
#ifdef MSDOSFS_DEBUG
printf("deget(pmp %p, dirclust %lu, diroffset %lx, depp %p)\n",
@ -172,7 +173,15 @@ deget(pmp, dirclust, diroffset, depp)
ldep->de_inode = inode;
fc_purge(ldep, 0); /* init the fat cache for this denode */
error = vfs_hash_insert(nvp, inode, LK_EXCLUSIVE, curthread, &xvp,
td = curthread;
lockmgr(nvp->v_vnlock, LK_EXCLUSIVE, NULL, td);
error = insmntque(nvp, mntp);
if (error != 0) {
FREE(ldep, M_MSDOSFSNODE);
*depp = NULL;
return (error);
}
error = vfs_hash_insert(nvp, inode, LK_EXCLUSIVE, td, &xvp,
de_vncmpf, &inode);
if (error) {
*depp = NULL;

View File

@ -706,6 +706,13 @@ ntfs_vgetex(
ntfs_ntput(ip);
return (error);
}
/* XXX: Too early for mpsafe fs, lacks vnode lock */
error = insmntque(vp, ntmp->ntm_mountp);
if (error) {
ntfs_frele(fp);
ntfs_ntput(ip);
return (error);
}
dprintf(("ntfs_vget: vnode: %p for ntnode: %d\n", vp,ino));
fp->f_vp = vp;

View File

@ -185,6 +185,18 @@ null_hashins(mp, xp)
return (NULLVP);
}
static void
null_insmntque_dtr(struct vnode *vp, void *xp)
{
vp->v_data = NULL;
vp->v_vnlock = &vp->v_lock;
FREE(xp, M_NULLFSNODE);
vp->v_op = &dead_vnodeops;
(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vgone(vp);
vput(vp);
}
/*
* Make a new or get existing nullfs node.
* Vp is the alias vnode, lowervp is the lower vnode.
@ -239,6 +251,9 @@ null_nodeget(mp, lowervp, vpp)
vp->v_vnlock = lowervp->v_vnlock;
if (vp->v_vnlock == NULL)
panic("null_nodeget: Passed a NULL vnlock.\n");
error = insmntque1(vp, mp, null_insmntque_dtr, xp);
if (error != 0)
return (error);
/*
* Atomically insert our new node into the hash or vget existing
* if someone else has beaten us to it.

View File

@ -174,6 +174,12 @@ nwfs_allocvp(struct mount *mp, ncpfid fid, struct nw_entry_info *fap,
FREE(np, M_NWNODE);
return (error);
}
error = insmntque(vp, mp); /* XXX: Too early for mpsafe fs */
if (error != 0) {
FREE(np, M_NWNODE);
*vpp = NULL;
return (error);
}
vp->v_data = np;
np->n_vnode = vp;
np->n_mount = nmp;

View File

@ -136,6 +136,13 @@ portal_mount(struct mount *mp, struct thread *td)
return (error);
}
error = insmntque(rvp, mp); /* XXX: Too early for mpsafe fs */
if (error != 0) {
FREE(fmp, M_PORTALFSMNT);
FREE(pn, M_TEMP);
fdrop(fp, td);
return (error);
}
rvp->v_data = pn;
rvp->v_type = VDIR;
rvp->v_vflag |= VV_ROOT;

View File

@ -154,6 +154,11 @@ portal_lookup(ap)
*vpp = fvp;
vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, td);
error = insmntque(fvp, dvp->v_mount);
if (error != 0) {
*vpp = NULLVP;
return (error);
}
return (0);
bad:;

View File

@ -180,6 +180,14 @@ pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
if ((pn->pn_flags & PFS_PROCDEP) != 0)
(*vpp)->v_vflag |= VV_PROCDEP;
pvd->pvd_vnode = *vpp;
(*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE;
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
error = insmntque(*vpp, mp);
if (error != 0) {
FREE(pvd, M_PFSVNCACHE);
*vpp = NULLVP;
return (error);
}
mtx_lock(&pfs_vncache_mutex);
pvd->pvd_prev = NULL;
pvd->pvd_next = pfs_vncache;
@ -187,8 +195,6 @@ pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
pvd->pvd_next->pvd_prev = pvd;
pfs_vncache = pvd;
mtx_unlock(&pfs_vncache_mutex);
(*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE;
vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread);
return (0);
}

View File

@ -242,6 +242,11 @@ smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
FREE(np, M_SMBNODE);
return error;
}
error = insmntque(vp, mp); /* XXX: Too early for mpsafe fs */
if (error != 0) {
FREE(np, M_SMBNODE);
return (error);
}
vp->v_type = fap->fa_attr & SMB_FA_DIR ? VDIR : VREG;
bzero(np, sizeof(*np));
vp->v_data = np;

View File

@ -613,7 +613,13 @@ udf_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
unode->udfmp = udfmp;
vp->v_data = unode;
error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
error = insmntque(vp, mp);
if (error != 0) {
uma_zfree(udf_zone_node, unode);
return (error);
}
error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
if (error || *vpp != NULL)
return (error);

View File

@ -214,6 +214,12 @@ umap_node_alloc(mp, lowervp, vpp)
return (error);
}
vp = *vpp;
error = insmntque(vp, mp); /* XXX: Too early for mpsafe fs */
if (error != 0) {
FREE(xp, M_TEMP);
*vpp = NULLVP;
return (error);
}
vp->v_type = lowervp->v_type;
xp->umap_vnode = vp;

View File

@ -243,6 +243,11 @@ unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
FREE(unp, M_UNIONFSNODE);
return (error);
}
error = insmntque(vp, mp); /* XXX: Too early for mpsafe fs */
if (error != 0) {
FREE(unp, M_UNIONFSNODE);
return (error);
}
if (dvp != NULLVP)
vref(dvp);
if (uppervp != NULLVP)

View File

@ -954,8 +954,10 @@ ext2_vget(mp, ino, flags, vpp)
struct cdev *dev;
int i, error;
int used_blocks;
struct thread *td;
error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
td = curthread;
error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL);
if (error || *vpp != NULL)
return (error);
@ -982,7 +984,14 @@ ext2_vget(mp, ino, flags, vpp)
ip->i_e2fs = fs = ump->um_e2fs;
ip->i_number = ino;
error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
error = insmntque(vp, mp);
if (error != 0) {
free(ip, M_EXT2NODE);
*vpp = NULL;
return (error);
}
error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
if (error || *vpp != NULL)
return (error);

View File

@ -815,6 +815,14 @@ reiserfs_iget(
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, (struct mtx *)0, td);
#endif
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
error = insmntque(vp, mp);
if (error != 0) {
free(ip, M_REISERFSNODE);
*vpp = NULL;
reiserfs_log(LOG_DEBUG, "insmntque FAILED\n");
return (error);
}
error = vfs_hash_insert(vp, key->on_disk_key.k_objectid, flags,
td, vpp, NULL, NULL);
if (error || *vpp != NULL)

View File

@ -391,6 +391,11 @@ xfs_vn_allocate(xfs_mount_t *mp, xfs_inode_t *ip, struct xfs_vnode **vpp)
vp->v_vnlock->lk_flags |= LK_CANRECURSE;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
error = insmntque(vp, XVFSTOMNT(XFS_MTOVFS(mp)));
if (error != 0) {
kmem_free(vdata, sizeof(*vdata));
return (error);
}
vp->v_data = (void *)vdata;
vdata->v_number= 0;

View File

@ -717,6 +717,12 @@ mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
error = getnewvnode("mqueue", mp, &mqfs_vnodeops, vpp);
if (error)
return (error);
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
error = insmntque(*vpp, mp);
if (error != 0) {
*vpp = NULLVP;
return (error);
}
vd = uma_zalloc(mvdata_zone, M_WAITOK);
(*vpp)->v_data = vd;
vd->mv_vnode = *vpp;
@ -744,7 +750,6 @@ mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
default:
panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type);
}
vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread);
return (0);
}

View File

@ -109,7 +109,6 @@ vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td, stru
struct vnode *vp2;
int error;
lockmgr(vp->v_vnlock, flags & LK_TYPE_MASK, NULL, td);
*vpp = NULL;
while (1) {
mtx_lock(&vfs_hash_mtx);

View File

@ -90,7 +90,6 @@ __FBSDID("$FreeBSD$");
static MALLOC_DEFINE(M_NETADDR, "subr_export_host", "Export host address structure");
static void delmntque(struct vnode *vp);
static void insmntque(struct vnode *vp, struct mount *mp);
static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
int slpflag, int slptimeo);
static void syncer_shutdown(void *arg, int howto);
@ -943,7 +942,6 @@ getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
printf("NULL mp in getnewvnode()\n");
#endif
if (mp != NULL) {
insmntque(vp, mp);
bo->bo_bsize = mp->mnt_stat.f_iosize;
if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
vp->v_vflag |= VV_NOKNOTE;
@ -975,22 +973,56 @@ delmntque(struct vnode *vp)
MNT_IUNLOCK(mp);
}
static void
insmntque_stddtr(struct vnode *vp, void *dtr_arg)
{
struct thread *td;
td = curthread; /* XXX ? */
vp->v_data = NULL;
vp->v_op = &dead_vnodeops;
/* XXX non mp-safe fs may still call insmntque with vnode
unlocked */
if (!VOP_ISLOCKED(vp, td))
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
vgone(vp);
vput(vp);
}
/*
* Insert into list of vnodes for the new mount point, if available.
*/
static void
insmntque(struct vnode *vp, struct mount *mp)
int
insmntque1(struct vnode *vp, struct mount *mp,
void (*dtr)(struct vnode *, void *), void *dtr_arg)
{
vp->v_mount = mp;
KASSERT(vp->v_mount == NULL,
("insmntque: vnode already on per mount vnode list"));
VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
MNT_ILOCK(mp);
if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 &&
mp->mnt_nvnodelistsize == 0) {
MNT_IUNLOCK(mp);
if (dtr != NULL)
dtr(vp, dtr_arg);
return (EBUSY);
}
vp->v_mount = mp;
MNT_REF(mp);
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
("neg mount point vnode list size"));
mp->mnt_nvnodelistsize++;
MNT_IUNLOCK(mp);
return (0);
}
int
insmntque(struct vnode *vp, struct mount *mp)
{
return (insmntque1(vp, mp, insmntque_stddtr, NULL));
}
/*
@ -3015,6 +3047,9 @@ vfs_allocate_syncvnode(struct mount *mp)
return (error);
}
vp->v_type = VNON;
error = insmntque(vp, mp);
if (error != 0)
panic("vfs_allocate_syncvnode: insmntque failed");
/*
* Place the vnode onto the syncer worklist. We attempt to
* scatter them about on the list so that they will go off

View File

@ -166,6 +166,17 @@ nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int
np->n_fhp = &np->n_fh;
bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
np->n_fhsize = fhsize;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
error = insmntque(vp, mntp);
if (error != 0) {
*npp = NULL;
if (np->n_fhsize > NFS_SMALLFH) {
FREE((caddr_t)np->n_fhp, M_NFSBIGFH);
}
mtx_destroy(&np->n_mtx);
uma_zfree(nfsnode_zone, np);
return (error);
}
error = vfs_hash_insert(vp, hash, flags,
td, &nvp, nfs_vncmpf, &ncmp);
if (error)

View File

@ -572,6 +572,9 @@ void cvtstat(struct stat *st, struct ostat *ost);
void cvtnstat(struct stat *sb, struct nstat *nsb);
int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
struct vnode **vpp);
int insmntque1(struct vnode *vp, struct mount *mp,
void (*dtr)(struct vnode *, void *), void *dtr_arg);
int insmntque(struct vnode *vp, struct mount *mp);
u_quad_t init_va_filerev(void);
int lease_check(struct vop_lease_args *ap);
int speedup_syncer(void);

View File

@ -1327,6 +1327,7 @@ ffs_vget(mp, ino, flags, vpp)
struct vnode *vp;
struct cdev *dev;
int error;
struct thread *td;
error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
if (error || *vpp != NULL)
@ -1391,7 +1392,15 @@ ffs_vget(mp, ino, flags, vpp)
}
#endif
error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
td = curthread;
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL, td);
error = insmntque(vp, mp);
if (error != 0) {
uma_zfree(uma_inode, ip);
*vpp = NULL;
return (error);
}
error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
if (error || *vpp != NULL)
return (error);