Eliminate the zfsdev_state_lock entirely and replace it with the
spa_namespace_lock. This fixes LOR between the spa_namespace_lock and spa_config lock. LOR can cause deadlock on vdevs removal/insertion. Reported by: gibbs, delphij Tested by: delphij Approved by: re (kib) MFC after: 1 week
This commit is contained in:
parent
dbb202b398
commit
7b1085ba55
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=224791
@ -353,7 +353,6 @@ extern void *zfsdev_get_soft_state(minor_t minor,
|
||||
extern minor_t zfsdev_minor_alloc(void);
|
||||
|
||||
extern void *zfsdev_state;
|
||||
extern kmutex_t zfsdev_state_lock;
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
@ -410,7 +410,7 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
|
||||
struct g_provider *pp;
|
||||
struct g_consumer *cp;
|
||||
size_t bufsize;
|
||||
int error, lock;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* We must have a pathname, and it must be absolute.
|
||||
@ -422,12 +422,6 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
|
||||
|
||||
vd->vdev_tsd = NULL;
|
||||
|
||||
if (mutex_owned(&spa_namespace_lock)) {
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
lock = 1;
|
||||
} else {
|
||||
lock = 0;
|
||||
}
|
||||
DROP_GIANT();
|
||||
g_topology_lock();
|
||||
error = 0;
|
||||
@ -459,11 +453,7 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
|
||||
!ISP2(cp->provider->sectorsize)) {
|
||||
ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
|
||||
vd->vdev_path);
|
||||
|
||||
g_topology_lock();
|
||||
vdev_geom_detach(cp, 0);
|
||||
g_topology_unlock();
|
||||
|
||||
error = EINVAL;
|
||||
cp = NULL;
|
||||
} else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
|
||||
@ -486,8 +476,6 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
|
||||
}
|
||||
g_topology_unlock();
|
||||
PICKUP_GIANT();
|
||||
if (lock)
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
if (cp == NULL) {
|
||||
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
|
||||
return (error);
|
||||
|
@ -4813,7 +4813,7 @@ zfsdev_minor_alloc(void)
|
||||
static minor_t last_minor;
|
||||
minor_t m;
|
||||
|
||||
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
for (m = last_minor + 1; m != last_minor; m++) {
|
||||
if (m > ZFSDEV_MAX_MINOR)
|
||||
@ -4833,7 +4833,7 @@ zfs_ctldev_init(struct cdev *devp)
|
||||
minor_t minor;
|
||||
zfs_soft_state_t *zs;
|
||||
|
||||
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
minor = zfsdev_minor_alloc();
|
||||
if (minor == 0)
|
||||
@ -4854,7 +4854,7 @@ zfs_ctldev_init(struct cdev *devp)
|
||||
static void
|
||||
zfs_ctldev_destroy(zfs_onexit_t *zo, minor_t minor)
|
||||
{
|
||||
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
zfs_onexit_destroy(zo);
|
||||
ddi_soft_state_free(zfsdev_state, minor);
|
||||
@ -4884,9 +4884,9 @@ zfsdev_open(struct cdev *devp, int flag, int mode, struct thread *td)
|
||||
|
||||
/* This is the control device. Allocate a new minor if requested. */
|
||||
if (flag & FEXCL) {
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
error = zfs_ctldev_init(devp);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
}
|
||||
|
||||
return (error);
|
||||
@ -4901,14 +4901,14 @@ zfsdev_close(void *data)
|
||||
if (minor == 0)
|
||||
return;
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
zo = zfsdev_get_soft_state(minor, ZSST_CTLDEV);
|
||||
if (zo == NULL) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return;
|
||||
}
|
||||
zfs_ctldev_destroy(zo, minor);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -94,12 +94,11 @@ static char *zvol_tag = "zvol_tag";
|
||||
#define ZVOL_DUMPSIZE "dumpsize"
|
||||
|
||||
/*
|
||||
* This lock protects the zfsdev_state structure from being modified
|
||||
* while it's being used, e.g. an open that comes in before a create
|
||||
* finishes. It also protects temporary opens of the dataset so that,
|
||||
* The spa_namespace_lock protects the zfsdev_state structure from being
|
||||
* modified while it's being used, e.g. an open that comes in before a
|
||||
* create finishes. It also protects temporary opens of the dataset so that,
|
||||
* e.g., an open doesn't get a spurious EBUSY.
|
||||
*/
|
||||
kmutex_t zfsdev_state_lock;
|
||||
static uint32_t zvol_minors;
|
||||
|
||||
typedef struct zvol_extent {
|
||||
@ -246,7 +245,7 @@ zvol_minor_lookup(const char *name)
|
||||
struct g_geom *gp;
|
||||
zvol_state_t *zv = NULL;
|
||||
|
||||
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
g_topology_lock();
|
||||
LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
|
||||
@ -462,11 +461,11 @@ zvol_name2minor(const char *name, minor_t *minor)
|
||||
{
|
||||
zvol_state_t *zv;
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
zv = zvol_minor_lookup(name);
|
||||
if (minor && zv)
|
||||
*minor = zv->zv_minor;
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (zv ? 0 : -1);
|
||||
}
|
||||
#endif /* sun */
|
||||
@ -485,10 +484,10 @@ zvol_create_minor(const char *name)
|
||||
|
||||
ZFS_LOG(1, "Creating ZVOL %s...", name);
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
|
||||
if (zvol_minor_lookup(name) != NULL) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (EEXIST);
|
||||
}
|
||||
|
||||
@ -496,20 +495,20 @@ zvol_create_minor(const char *name)
|
||||
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
|
||||
|
||||
if (error) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (error);
|
||||
}
|
||||
|
||||
#ifdef sun
|
||||
if ((minor = zfsdev_minor_alloc()) == 0) {
|
||||
dmu_objset_disown(os, FTAG);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
|
||||
dmu_objset_disown(os, FTAG);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (EAGAIN);
|
||||
}
|
||||
(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
|
||||
@ -521,7 +520,7 @@ zvol_create_minor(const char *name)
|
||||
minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
|
||||
ddi_soft_state_free(zfsdev_state, minor);
|
||||
dmu_objset_disown(os, FTAG);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (EAGAIN);
|
||||
}
|
||||
|
||||
@ -532,7 +531,7 @@ zvol_create_minor(const char *name)
|
||||
ddi_remove_minor_node(zfs_dip, chrbuf);
|
||||
ddi_soft_state_free(zfsdev_state, minor);
|
||||
dmu_objset_disown(os, FTAG);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (EAGAIN);
|
||||
}
|
||||
|
||||
@ -572,7 +571,7 @@ zvol_create_minor(const char *name)
|
||||
|
||||
zvol_minors++;
|
||||
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
|
||||
zvol_geom_run(zv);
|
||||
|
||||
@ -594,7 +593,7 @@ zvol_remove_zv(zvol_state_t *zv)
|
||||
minor_t minor = zv->zv_minor;
|
||||
#endif
|
||||
|
||||
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
if (zv->zv_total_opens != 0)
|
||||
return (EBUSY);
|
||||
|
||||
@ -620,15 +619,15 @@ zvol_remove_minor(const char *name)
|
||||
zvol_state_t *zv;
|
||||
int rc;
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
if ((zv = zvol_minor_lookup(name)) == NULL) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (ENXIO);
|
||||
}
|
||||
g_topology_lock();
|
||||
rc = zvol_remove_zv(zv);
|
||||
g_topology_unlock();
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (rc);
|
||||
}
|
||||
|
||||
@ -730,7 +729,7 @@ zvol_update_volsize(objset_t *os, uint64_t volsize)
|
||||
dmu_tx_t *tx;
|
||||
int error;
|
||||
|
||||
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
|
||||
@ -761,7 +760,7 @@ zvol_remove_minors(const char *name)
|
||||
namelen = strlen(name);
|
||||
|
||||
DROP_GIANT();
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
g_topology_lock();
|
||||
|
||||
LIST_FOREACH_SAFE(gp, &zfs_zvol_class.geom, geom, gptmp) {
|
||||
@ -779,7 +778,7 @@ zvol_remove_minors(const char *name)
|
||||
}
|
||||
|
||||
g_topology_unlock();
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
PICKUP_GIANT();
|
||||
}
|
||||
|
||||
@ -793,10 +792,10 @@ zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
|
||||
uint64_t old_volsize = 0ULL;
|
||||
uint64_t readonly;
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
zv = zvol_minor_lookup(name);
|
||||
if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -863,7 +862,7 @@ zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
|
||||
out:
|
||||
dmu_objset_rele(os, FTAG);
|
||||
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
|
||||
return (error);
|
||||
}
|
||||
@ -875,18 +874,18 @@ zvol_open(struct g_provider *pp, int flag, int count)
|
||||
zvol_state_t *zv;
|
||||
int err = 0;
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
|
||||
zv = pp->private;
|
||||
if (zv == NULL) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
if (zv->zv_total_opens == 0)
|
||||
err = zvol_first_open(zv);
|
||||
if (err) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (err);
|
||||
}
|
||||
if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
|
||||
@ -908,13 +907,13 @@ zvol_open(struct g_provider *pp, int flag, int count)
|
||||
#endif
|
||||
|
||||
zv->zv_total_opens += count;
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
|
||||
return (err);
|
||||
out:
|
||||
if (zv->zv_total_opens == 0)
|
||||
zvol_last_close(zv);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (err);
|
||||
}
|
||||
|
||||
@ -925,11 +924,11 @@ zvol_close(struct g_provider *pp, int flag, int count)
|
||||
zvol_state_t *zv;
|
||||
int error = 0;
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
|
||||
zv = pp->private;
|
||||
if (zv == NULL) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
@ -952,7 +951,7 @@ zvol_close(struct g_provider *pp, int flag, int count)
|
||||
if (zv->zv_total_opens == 0)
|
||||
zvol_last_close(zv);
|
||||
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1571,12 +1570,12 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
|
||||
int error = 0;
|
||||
rl_t *rl;
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
|
||||
zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
|
||||
|
||||
if (zv == NULL) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (ENXIO);
|
||||
}
|
||||
ASSERT(zv->zv_total_opens > 0);
|
||||
@ -1590,7 +1589,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
|
||||
dki.dki_ctype = DKC_UNKNOWN;
|
||||
dki.dki_unit = getminor(dev);
|
||||
dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
|
||||
error = EFAULT;
|
||||
return (error);
|
||||
@ -1600,7 +1599,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
|
||||
dkm.dki_lbsize = 1U << zv->zv_min_bs;
|
||||
dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
|
||||
dkm.dki_media_type = DK_UNKNOWN;
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
|
||||
error = EFAULT;
|
||||
return (error);
|
||||
@ -1610,14 +1609,14 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
|
||||
uint64_t vs = zv->zv_volsize;
|
||||
uint8_t bs = zv->zv_min_bs;
|
||||
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
error = zvol_getefi((void *)arg, flag, vs, bs);
|
||||
return (error);
|
||||
}
|
||||
|
||||
case DKIOCFLUSHWRITECACHE:
|
||||
dkc = (struct dk_callback *)arg;
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
zil_commit(zv->zv_zilog, ZVOL_OBJ);
|
||||
if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
|
||||
(*dkc->dkc_callback)(dkc->dkc_cookie, error);
|
||||
@ -1643,10 +1642,10 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
|
||||
}
|
||||
if (wce) {
|
||||
zv->zv_flags |= ZVOL_WCE;
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
} else {
|
||||
zv->zv_flags &= ~ZVOL_WCE;
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
zil_commit(zv->zv_zilog, ZVOL_OBJ);
|
||||
}
|
||||
return (0);
|
||||
@ -1682,7 +1681,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
|
||||
break;
|
||||
|
||||
}
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
return (error);
|
||||
}
|
||||
#endif /* sun */
|
||||
@ -1698,14 +1697,12 @@ zvol_init(void)
|
||||
{
|
||||
VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
|
||||
1) == 0);
|
||||
mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
ZFS_LOG(1, "ZVOL Initialized.");
|
||||
}
|
||||
|
||||
void
|
||||
zvol_fini(void)
|
||||
{
|
||||
mutex_destroy(&zfsdev_state_lock);
|
||||
ddi_soft_state_fini(&zfsdev_state);
|
||||
ZFS_LOG(1, "ZVOL Deinitialized.");
|
||||
}
|
||||
@ -1720,7 +1717,7 @@ zvol_dump_init(zvol_state_t *zv, boolean_t resize)
|
||||
nvlist_t *nv = NULL;
|
||||
uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
|
||||
|
||||
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
|
||||
DMU_OBJECT_END);
|
||||
/* wait for dmu_free_long_range to actually free the blocks */
|
||||
@ -2230,7 +2227,7 @@ zvol_rename_minor(struct g_geom *gp, const char *newname)
|
||||
struct g_provider *pp;
|
||||
zvol_state_t *zv;
|
||||
|
||||
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
g_topology_assert();
|
||||
|
||||
pp = LIST_FIRST(&gp->provider);
|
||||
@ -2264,7 +2261,7 @@ zvol_rename_minors(const char *oldname, const char *newname)
|
||||
newnamelen = strlen(newname);
|
||||
|
||||
DROP_GIANT();
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
mutex_enter(&spa_namespace_lock);
|
||||
g_topology_lock();
|
||||
|
||||
LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
|
||||
@ -2287,6 +2284,6 @@ zvol_rename_minors(const char *oldname, const char *newname)
|
||||
}
|
||||
|
||||
g_topology_unlock();
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
PICKUP_GIANT();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user