Properly lock the vnode around vgone() calls.

Unlock the vnode in devfs_close() while calling into the driver d_close()
routine.

devfs_revoke() changes by:	ups
Reviewed and bugfixes by:	tegge
Tested by:	mbr, Peter Holm
Approved by:	pjd (mentor)
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2006-10-18 11:17:14 +00:00
parent b912fe73ee
commit 828d6d12da
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=163481
4 changed files with 113 additions and 25 deletions

View File

@ -173,7 +173,7 @@ void devfs_rules_apply(struct devfs_mount *dm, struct devfs_dirent *de);
void devfs_rules_cleanup (struct devfs_mount *dm);
int devfs_rules_ioctl(struct devfs_mount *dm, u_long cmd, caddr_t data, struct thread *td);
int devfs_allocv (struct devfs_dirent *de, struct mount *mp, struct vnode **vpp, struct thread *td);
void devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de);
void devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked);
void devfs_dirent_free(struct devfs_dirent *de);
void devfs_populate (struct devfs_mount *dm);
void devfs_cleanup (struct devfs_mount *dm);

View File

@ -53,7 +53,7 @@
* The one true (but secret) list of active devices in the system.
* Locked by dev_lock()/devmtx
*/
static TAILQ_HEAD(,cdev_priv) cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list);
struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list);
struct unrhdr *devfs_inos;
@ -236,24 +236,42 @@ devfs_dirent_free(struct devfs_dirent *de)
free(de, M_DEVFS3);
}
/*
* The caller needs to hold the dm for the duration of the call since
* dm->dm_lock may be temporary dropped.
*/
void
devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de)
devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked)
{
struct vnode *vp;
struct thread *td;
KASSERT((de->de_flags & DE_DOOMED) == 0,
("devfs_delete doomed dirent"));
td = curthread;
de->de_flags |= DE_DOOMED;
mtx_lock(&devfs_de_interlock);
vp = de->de_vnode;
if (vp != NULL) {
VI_LOCK(vp);
mtx_unlock(&devfs_de_interlock);
vholdl(vp);
sx_unlock(&dm->dm_lock);
if (!vp_locked)
vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY, td);
else
VI_UNLOCK(vp);
vgone(vp);
if (!vp_locked)
VOP_UNLOCK(vp, 0, td);
vdrop(vp);
sx_xlock(&dm->dm_lock);
} else
mtx_unlock(&devfs_de_interlock);
if (de->de_symlink) {
free(de->de_symlink, M_DEVFS);
de->de_symlink = NULL;
}
if (de->de_vnode != NULL) {
vhold(de->de_vnode);
de->de_vnode->v_data = NULL;
vgone(de->de_vnode);
vdrop(de->de_vnode);
de->de_vnode = NULL;
}
#ifdef MAC
mac_destroy_devfsdirent(de);
#endif
@ -267,7 +285,8 @@ devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de)
/*
* Called on unmount.
* Recursively removes the entire tree
* Recursively removes the entire tree.
* The caller needs to hold the dm for the duration of the call.
*/
static void
@ -282,13 +301,13 @@ devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd)
break;
TAILQ_REMOVE(&dd->de_dlist, de, de_list);
if (de->de_flags & (DE_DOT|DE_DOTDOT))
devfs_delete(dm, de);
devfs_delete(dm, de, 0);
else if (de->de_dirent->d_type == DT_DIR)
devfs_purge(dm, de);
else
devfs_delete(dm, de);
devfs_delete(dm, de, 0);
}
devfs_delete(dm, dd);
devfs_delete(dm, dd, 0);
}
/*
@ -324,6 +343,9 @@ devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm)
dev_unlock();
}
/*
* The caller needs to hold the dm for the duration of the call.
*/
static int
devfs_populate_loop(struct devfs_mount *dm, int cleanup)
{
@ -349,7 +371,6 @@ devfs_populate_loop(struct devfs_mount *dm, int cleanup)
cdp->cdp_dirents[dm->dm_idx] != NULL) {
de = cdp->cdp_dirents[dm->dm_idx];
cdp->cdp_dirents[dm->dm_idx] = NULL;
cdp->cdp_inuse--;
KASSERT(cdp == de->de_cdp,
("%s %d %s %p %p", __func__, __LINE__,
cdp->cdp_c.si_name, cdp, de->de_cdp));
@ -359,7 +380,10 @@ devfs_populate_loop(struct devfs_mount *dm, int cleanup)
TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list);
de->de_cdp = NULL;
de->de_inode = 0;
devfs_delete(dm, de);
devfs_delete(dm, de, 0);
dev_lock();
cdp->cdp_inuse--;
dev_unlock();
return (1);
}
/*
@ -447,6 +471,9 @@ devfs_populate_loop(struct devfs_mount *dm, int cleanup)
return (0);
}
/*
* The caller needs to hold the dm for the duration of the call.
*/
void
devfs_populate(struct devfs_mount *dm)
{
@ -459,6 +486,9 @@ devfs_populate(struct devfs_mount *dm)
dm->dm_generation = devfs_generation;
}
/*
* The caller needs to hold the dm for the duration of the call.
*/
void
devfs_cleanup(struct devfs_mount *dm)
{

View File

@ -61,6 +61,8 @@ void devfs_destroy(struct cdev *dev);
extern struct unrhdr *devfs_inos;
extern struct mtx devmtx;
extern struct mtx devfs_de_interlock;
extern TAILQ_HEAD(cdev_priv_list, cdev_priv) cdevp_list;
#endif /* _KERNEL */

View File

@ -71,7 +71,7 @@ static struct fileops devfs_ops_f;
#include <fs/devfs/devfs.h>
#include <fs/devfs/devfs_int.h>
static struct mtx devfs_de_interlock;
struct mtx devfs_de_interlock;
MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
static int
@ -286,7 +286,7 @@ devfs_close(struct vop_close_args *ap)
struct thread *td = ap->a_td;
struct cdev *dev = vp->v_rdev;
struct cdevsw *dsw;
int error;
int vp_locked, error;
/*
* Hack: a tty device that is a controlling terminal
@ -334,7 +334,10 @@ devfs_close(struct vop_close_args *ap)
dev_relthread(dev);
return (0);
}
vholdl(vp);
VI_UNLOCK(vp);
vp_locked = VOP_ISLOCKED(vp, td);
VOP_UNLOCK(vp, 0, td);
KASSERT(dev->si_refcount > 0,
("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
if (!(dsw->d_flags & D_NEEDGIANT)) {
@ -345,6 +348,8 @@ devfs_close(struct vop_close_args *ap)
error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
}
dev_relthread(dev);
vn_lock(vp, vp_locked | LK_RETRY, td);
vdrop(vp);
return (error);
}
@ -568,7 +573,14 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
return (error);
}
DEVFS_DMP_HOLD(dmp);
devfs_populate(dmp);
if (DEVFS_DMP_DROP(dmp)) {
*dm_unlock = 0;
sx_xunlock(&dmp->dm_lock);
devfs_unmount_final(dmp);
return (ENOENT);
}
dd = dvp->v_data;
de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen);
while (de == NULL) { /* While(...) so we can use break */
@ -590,7 +602,14 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
if (cdev == NULL)
break;
DEVFS_DMP_HOLD(dmp);
devfs_populate(dmp);
if (DEVFS_DMP_DROP(dmp)) {
*dm_unlock = 0;
sx_xunlock(&dmp->dm_lock);
devfs_unmount_final(dmp);
return (ENOENT);
}
dev_lock();
dde = &cdev->si_priv->cdp_dirents[dmp->dm_idx];
@ -875,7 +894,15 @@ devfs_readdir(struct vop_readdir_args *ap)
dmp = VFSTODEVFS(ap->a_vp->v_mount);
sx_xlock(&dmp->dm_lock);
DEVFS_DMP_HOLD(dmp);
devfs_populate(dmp);
if (DEVFS_DMP_DROP(dmp)) {
sx_xunlock(&dmp->dm_lock);
devfs_unmount_final(dmp);
if (tmp_ncookies != NULL)
ap->a_ncookies = tmp_ncookies;
return (EIO);
}
error = 0;
de = ap->a_vp->v_data;
off = 0;
@ -927,7 +954,7 @@ devfs_reclaim(struct vop_reclaim_args *ap)
struct vnode *vp = ap->a_vp;
struct devfs_dirent *de;
struct cdev *dev;
mtx_lock(&devfs_de_interlock);
de = vp->v_data;
if (de != NULL) {
@ -964,7 +991,7 @@ devfs_remove(struct vop_remove_args *ap)
de = vp->v_data;
if (de->de_cdp == NULL) {
TAILQ_REMOVE(&dd->de_dlist, de, de_list);
devfs_delete(dmp, de);
devfs_delete(dmp, de, 1);
} else {
de->de_flags |= DE_WHITEOUT;
}
@ -991,6 +1018,17 @@ devfs_revoke(struct vop_revoke_args *ap)
dev = vp->v_rdev;
cdp = dev->si_priv;
dev_lock();
cdp->cdp_inuse++;
dev_unlock();
vhold(vp);
vgone(vp);
vdrop(vp);
VOP_UNLOCK(vp,0,curthread);
loop:
for (;;) {
mtx_lock(&devfs_de_interlock);
dev_lock();
@ -1000,18 +1038,20 @@ devfs_revoke(struct vop_revoke_args *ap)
if (de == NULL)
continue;
vp2 = de->de_vnode;
vp2 = de->de_vnode;
if (vp2 != NULL) {
de->de_vnode = NULL;
dev_unlock();
VI_LOCK(vp2);
mtx_unlock(&devfs_de_interlock);
vholdl(vp2);
VI_UNLOCK(vp2);
if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
curthread))
goto loop;
vhold(vp2);
vgone(vp2);
vdrop(vp2);
vput(vp2);
break;
}
}
}
if (vp2 != NULL) {
continue;
@ -1020,6 +1060,16 @@ devfs_revoke(struct vop_revoke_args *ap)
mtx_unlock(&devfs_de_interlock);
break;
}
dev_lock();
cdp->cdp_inuse--;
if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
dev_unlock();
dev_rel(&cdp->cdp_c);
} else
dev_unlock();
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
return (0);
}
@ -1031,7 +1081,13 @@ devfs_rioctl(struct vop_ioctl_args *ap)
dmp = VFSTODEVFS(ap->a_vp->v_mount);
sx_xlock(&dmp->dm_lock);
DEVFS_DMP_HOLD(dmp);
devfs_populate(dmp);
if (DEVFS_DMP_DROP(dmp)) {
sx_xunlock(&dmp->dm_lock);
devfs_unmount_final(dmp);
return (ENOENT);
}
error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
sx_xunlock(&dmp->dm_lock);
return (error);