Add mutex and two flags to make orphan() call properly asynchronous:

- delay consumer closing and detaching on orphan() until all I/Os complete;
 - prevent new I/Os submission after orphan() called.
Previous implementation could destroy consumers still having active
requests and worked only because of global workaround made on GEOM level.
This commit is contained in:
mav 2011-11-02 09:24:59 +00:00
parent e799577150
commit f082fe73d7

View File

@ -31,7 +31,9 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/vnode.h>
#include <sys/mount.h> /* XXX Temporary for VFS_LOCK_GIANT */
@ -45,6 +47,13 @@ __FBSDID("$FreeBSD$");
*/
#include <sys/buf.h>
struct g_vfs_softc {
struct mtx sc_mtx;
struct bufobj *sc_bo;
int sc_active;
int sc_orphaned;
};
static struct buf_ops __g_vfs_bufops = {
.bop_name = "GEOM_VFS",
.bop_write = bufwrite,
@ -65,22 +74,30 @@ static struct g_class g_vfs_class = {
DECLARE_GEOM_CLASS(g_vfs_class, g_vfs);
static void
g_vfs_destroy(void *arg, int flags __unused)
{
struct g_consumer *cp;
g_topology_assert();
cp = arg;
if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
g_access(cp, -cp->acr, -cp->acw, -cp->ace);
g_detach(cp);
if (cp->geom->softc == NULL)
g_wither_geom(cp->geom, ENXIO);
}
static void
g_vfs_done(struct bio *bip)
{
struct g_consumer *cp;
struct g_vfs_softc *sc;
struct buf *bp;
int vfslocked;
/*
* Provider ('bio_to') could have withered away sometime
* between incrementing the 'nend' in g_io_deliver() and now,
* making 'bio_to' a dangling pointer. We cannot do that
* in g_wither_geom(), as it would require going over
* the 'g_bio_run_up' list, resetting the pointer.
*/
if (bip->bio_from->provider == NULL)
bip->bio_to = NULL;
int vfslocked, destroy;
cp = bip->bio_from;
sc = cp->geom->softc;
if (bip->bio_error) {
printf("g_vfs_done():");
g_print_bio(bip);
@ -93,6 +110,13 @@ g_vfs_done(struct bio *bip)
bp->b_ioflags |= BIO_ERROR;
bp->b_resid = bp->b_bcount - bip->bio_completed;
g_destroy_bio(bip);
mtx_lock(&sc->sc_mtx);
destroy = ((--sc->sc_active) == 0 && sc->sc_orphaned);
mtx_unlock(&sc->sc_mtx);
if (destroy)
g_post_event(g_vfs_destroy, cp, M_WAITOK, NULL);
vfslocked = VFS_LOCK_GIANT(((struct mount *)NULL));
bufdone(bp);
VFS_UNLOCK_GIANT(vfslocked);
@ -101,17 +125,20 @@ g_vfs_done(struct bio *bip)
void
g_vfs_strategy(struct bufobj *bo, struct buf *bp)
{
struct g_vfs_softc *sc;
struct g_consumer *cp;
struct bio *bip;
int vfslocked;
cp = bo->bo_private;
/* G_VALID_CONSUMER(cp); We likely lack topology lock */
sc = cp->geom->softc;
/*
* If the provider has orphaned us, just return EXIO.
*/
if (cp->provider == NULL) {
mtx_lock(&sc->sc_mtx);
if (sc->sc_orphaned) {
mtx_unlock(&sc->sc_mtx);
bp->b_error = ENXIO;
bp->b_ioflags |= BIO_ERROR;
vfslocked = VFS_LOCK_GIANT(((struct mount *)NULL));
@ -119,6 +146,8 @@ g_vfs_strategy(struct bufobj *bo, struct buf *bp)
VFS_UNLOCK_GIANT(vfslocked);
return;
}
sc->sc_active++;
mtx_unlock(&sc->sc_mtx);
bip = g_alloc_bio();
bip->bio_cmd = bp->b_iocmd;
@ -134,14 +163,20 @@ static void
g_vfs_orphan(struct g_consumer *cp)
{
struct g_geom *gp;
struct g_vfs_softc *sc;
int destroy;
g_topology_assert();
gp = cp->geom;
sc = gp->softc;
g_trace(G_T_TOPOLOGY, "g_vfs_orphan(%p(%s))", cp, gp->name);
if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
g_access(cp, -cp->acr, -cp->acw, -cp->ace);
g_detach(cp);
mtx_lock(&sc->sc_mtx);
sc->sc_orphaned = 1;
destroy = (sc->sc_active == 0);
mtx_unlock(&sc->sc_mtx);
if (destroy)
g_vfs_destroy(cp, 0);
/*
* Do not destroy the geom. Filesystem will do that during unmount.
@ -154,6 +189,7 @@ g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr
struct g_geom *gp;
struct g_provider *pp;
struct g_consumer *cp;
struct g_vfs_softc *sc;
struct bufobj *bo;
int vfslocked;
int error;
@ -169,6 +205,10 @@ g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr
if (pp == NULL)
return (ENOENT);
gp = g_new_geomf(&g_vfs_class, "%s.%s", fsname, pp->name);
sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
mtx_init(&sc->sc_mtx, "g_vfs", NULL, MTX_DEF);
sc->sc_bo = bo;
gp->softc = sc;
cp = g_new_consumer(gp);
g_attach(cp, pp);
error = g_access(cp, 1, wr, wr);
@ -184,7 +224,6 @@ g_vfs_open(struct vnode *vp, struct g_consumer **cpp, const char *fsname, int wr
bo->bo_ops = g_vfs_bufops;
bo->bo_private = cp;
bo->bo_bsize = pp->sectorsize;
gp->softc = bo;
return (error);
}
@ -193,13 +232,17 @@ void
g_vfs_close(struct g_consumer *cp)
{
struct g_geom *gp;
struct bufobj *bo;
struct g_vfs_softc *sc;
g_topology_assert();
gp = cp->geom;
bo = gp->softc;
bufobj_invalbuf(bo, V_SAVE, 0, 0);
bo->bo_private = cp->private;
sc = gp->softc;
bufobj_invalbuf(sc->sc_bo, V_SAVE, 0, 0);
sc->sc_bo->bo_private = cp->private;
gp->softc = NULL;
mtx_destroy(&sc->sc_mtx);
if (!sc->sc_orphaned || cp->provider == NULL)
g_wither_geom_close(gp, ENXIO);
g_free(sc);
}