devfs: introduce a per-dev lock to protect ->si_devsw
This allows bumping threadcount without taking the global devmtx lock. In particular this eliminates contention on said lock while using bhyve with multiple vms. Reviewed by: kib Tested by: markj MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D22548
This commit is contained in:
parent
7cd6d1bac3
commit
afeeba244b
@ -138,6 +138,8 @@ devfs_alloc(int flags)
|
||||
if (cdp == NULL)
|
||||
return (NULL);
|
||||
|
||||
mtx_init(&cdp->cdp_threadlock, "devthrd", NULL, MTX_DEF);
|
||||
|
||||
cdp->cdp_dirents = &cdp->cdp_dirent0;
|
||||
|
||||
cdev = &cdp->cdp_c;
|
||||
@ -180,6 +182,7 @@ devfs_free(struct cdev *cdev)
|
||||
devfs_free_cdp_inode(cdp->cdp_inode);
|
||||
if (cdp->cdp_maxdirent > 0)
|
||||
free(cdp->cdp_dirents, M_DEVFS2);
|
||||
mtx_destroy(&cdp->cdp_threadlock);
|
||||
free(cdp, M_CDEVP);
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,8 @@ struct cdev_priv {
|
||||
void *cdp_dtr_cb_arg;
|
||||
|
||||
LIST_HEAD(, cdev_privdata) cdp_fdpriv;
|
||||
|
||||
struct mtx cdp_threadlock;
|
||||
};
|
||||
|
||||
#define cdev2priv(c) __containerof(c, struct cdev_priv, cdp_c)
|
||||
|
@ -186,16 +186,16 @@ dev_refthread(struct cdev *dev, int *ref)
|
||||
*ref = 0;
|
||||
return (dev->si_devsw);
|
||||
}
|
||||
dev_lock();
|
||||
cdp = cdev2priv(dev);
|
||||
mtx_lock(&cdp->cdp_threadlock);
|
||||
csw = dev->si_devsw;
|
||||
if (csw != NULL) {
|
||||
cdp = cdev2priv(dev);
|
||||
if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0)
|
||||
atomic_add_long(&dev->si_threadcount, 1);
|
||||
else
|
||||
csw = NULL;
|
||||
}
|
||||
dev_unlock();
|
||||
mtx_unlock(&cdp->cdp_threadlock);
|
||||
if (csw != NULL)
|
||||
*ref = 1;
|
||||
return (csw);
|
||||
@ -223,19 +223,21 @@ devvn_refthread(struct vnode *vp, struct cdev **devp, int *ref)
|
||||
}
|
||||
|
||||
csw = NULL;
|
||||
dev_lock();
|
||||
VI_LOCK(vp);
|
||||
dev = vp->v_rdev;
|
||||
if (dev == NULL) {
|
||||
dev_unlock();
|
||||
VI_UNLOCK(vp);
|
||||
return (NULL);
|
||||
}
|
||||
cdp = cdev2priv(dev);
|
||||
mtx_lock(&cdp->cdp_threadlock);
|
||||
if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) {
|
||||
csw = dev->si_devsw;
|
||||
if (csw != NULL)
|
||||
atomic_add_long(&dev->si_threadcount, 1);
|
||||
}
|
||||
dev_unlock();
|
||||
mtx_unlock(&cdp->cdp_threadlock);
|
||||
VI_UNLOCK(vp);
|
||||
if (csw != NULL) {
|
||||
*devp = dev;
|
||||
*ref = 1;
|
||||
@ -1136,20 +1138,26 @@ destroy_devl(struct cdev *dev)
|
||||
dev->si_flags &= ~SI_CLONELIST;
|
||||
}
|
||||
|
||||
mtx_lock(&cdp->cdp_threadlock);
|
||||
csw = dev->si_devsw;
|
||||
dev->si_devsw = NULL; /* already NULL for SI_ALIAS */
|
||||
while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) {
|
||||
csw->d_purge(dev);
|
||||
mtx_unlock(&cdp->cdp_threadlock);
|
||||
msleep(csw, &devmtx, PRIBIO, "devprg", hz/10);
|
||||
mtx_lock(&cdp->cdp_threadlock);
|
||||
if (dev->si_threadcount)
|
||||
printf("Still %lu threads in %s\n",
|
||||
dev->si_threadcount, devtoname(dev));
|
||||
}
|
||||
while (dev->si_threadcount != 0) {
|
||||
/* Use unique dummy wait ident */
|
||||
mtx_unlock(&cdp->cdp_threadlock);
|
||||
msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10);
|
||||
mtx_lock(&cdp->cdp_threadlock);
|
||||
}
|
||||
|
||||
mtx_unlock(&cdp->cdp_threadlock);
|
||||
dev_unlock();
|
||||
if ((cdp->cdp_flags & CDP_UNREF_DTR) == 0) {
|
||||
/* avoid out of order notify events */
|
||||
|
@ -602,6 +602,7 @@ static struct witness_order_list_entry order_lists[] = {
|
||||
{ "vm map (system)", &lock_class_mtx_sleep },
|
||||
{ "vnode interlock", &lock_class_mtx_sleep },
|
||||
{ "cdev", &lock_class_mtx_sleep },
|
||||
{ "devthrd", &lock_class_mtx_sleep },
|
||||
{ NULL, NULL },
|
||||
/*
|
||||
* VM
|
||||
|
Loading…
x
Reference in New Issue
Block a user