Use providergone method to cover race between destroy and g_access().

Reviewed by:	markj
MFC after:	2 weeks
Sponsored by:	iXsystems, Inc.
This commit is contained in:
Alexander Motin 2016-11-13 03:56:26 +00:00
parent 6487a709f3
commit dc399583ba
2 changed files with 40 additions and 27 deletions

View File

@ -79,12 +79,12 @@ SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
static eventhandler_tag g_mirror_post_sync = NULL;
static int g_mirror_shutdown = 0;
static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
struct g_geom *gp);
static g_ctl_destroy_geom_t g_mirror_destroy_geom;
static g_taste_t g_mirror_taste;
static g_init_t g_mirror_init;
static g_fini_t g_mirror_fini;
static g_provgone_t g_mirror_providergone;
static g_resize_t g_mirror_resize;
static void g_mirror_init(struct g_class *mp);
static void g_mirror_fini(struct g_class *mp);
struct g_class g_mirror_class = {
.name = G_MIRROR_CLASS_NAME,
@ -94,6 +94,7 @@ struct g_class g_mirror_class = {
.destroy_geom = g_mirror_destroy_geom,
.init = g_mirror_init,
.fini = g_mirror_fini,
.providergone = g_mirror_providergone,
.resize = g_mirror_resize
};
@ -507,6 +508,26 @@ g_mirror_destroy_disk(struct g_mirror_disk *disk)
}
}
static void
g_mirror_free_device(struct g_mirror_softc *sc)
{
mtx_destroy(&sc->sc_queue_mtx);
mtx_destroy(&sc->sc_events_mtx);
mtx_destroy(&sc->sc_done_mtx);
sx_destroy(&sc->sc_lock);
free(sc, M_MIRROR);
}
static void
g_mirror_providergone(struct g_provider *pp)
{
struct g_mirror_softc *sc = pp->private;
if ((--sc->sc_refcnt) == 0)
g_mirror_free_device(sc);
}
static void
g_mirror_destroy_device(struct g_mirror_softc *sc)
{
@ -549,12 +570,10 @@ g_mirror_destroy_device(struct g_mirror_softc *sc)
g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
g_wither_geom(gp, ENXIO);
g_topology_unlock();
mtx_destroy(&sc->sc_queue_mtx);
mtx_destroy(&sc->sc_events_mtx);
mtx_destroy(&sc->sc_done_mtx);
sx_xunlock(&sc->sc_lock);
sx_destroy(&sc->sc_lock);
if ((--sc->sc_refcnt) == 0)
g_mirror_free_device(sc);
g_topology_unlock();
}
static void
@ -868,7 +887,7 @@ g_mirror_flush_done(struct bio *bp)
struct bio *pbp;
pbp = bp->bio_parent;
sc = pbp->bio_to->geom->softc;
sc = pbp->bio_to->private;
mtx_lock(&sc->sc_done_mtx);
if (pbp->bio_error == 0)
pbp->bio_error = bp->bio_error;
@ -905,7 +924,7 @@ g_mirror_regular_request(struct bio *bp)
g_topology_assert_not();
pbp = bp->bio_parent;
sc = pbp->bio_to->geom->softc;
sc = pbp->bio_to->private;
bp->bio_from->index--;
if (bp->bio_cmd == BIO_WRITE)
sc->sc_writes--;
@ -1036,7 +1055,7 @@ g_mirror_candelete(struct bio *bp)
struct g_mirror_disk *disk;
int *val;
sc = bp->bio_to->geom->softc;
sc = bp->bio_to->private;
LIST_FOREACH(disk, &sc->sc_disks, d_next) {
if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE)
break;
@ -1061,7 +1080,7 @@ g_mirror_kernel_dump(struct bio *bp)
* we will not be able to read the dump after the reboot if it will be
* connected and synchronized later. Can we do something better?
*/
sc = bp->bio_to->geom->softc;
sc = bp->bio_to->private;
disk = LIST_FIRST(&sc->sc_disks);
gkd = (struct g_kerneldump *)bp->bio_data;
@ -1121,7 +1140,7 @@ g_mirror_start(struct bio *bp)
{
struct g_mirror_softc *sc;
sc = bp->bio_to->geom->softc;
sc = bp->bio_to->private;
/*
* If sc == NULL or there are no valid disks, provider's error
* should be set and g_mirror_start() should not be called at all.
@ -1637,7 +1656,7 @@ g_mirror_register_request(struct bio *bp)
{
struct g_mirror_softc *sc;
sc = bp->bio_to->geom->softc;
sc = bp->bio_to->private;
switch (bp->bio_cmd) {
case BIO_READ:
switch (sc->sc_balance) {
@ -1804,7 +1823,6 @@ g_mirror_try_destroy(struct g_mirror_softc *sc)
} else {
g_topology_unlock();
g_mirror_destroy_device(sc);
free(sc, M_MIRROR);
}
return (1);
}
@ -2117,6 +2135,8 @@ g_mirror_launch_provider(struct g_mirror_softc *sc)
}
}
}
pp->private = sc;
sc->sc_refcnt++;
sc->sc_provider = pp;
g_error_provider(pp, 0);
g_topology_unlock();
@ -2898,9 +2918,7 @@ g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
acw, ace);
sc = pp->geom->softc;
if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
return (0);
sc = pp->private;
KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
g_topology_unlock();
@ -2959,6 +2977,7 @@ g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
sc->sc_idle = 1;
sc->sc_last_write = time_uptime;
sc->sc_writes = 0;
sc->sc_refcnt = 1;
sx_init(&sc->sc_lock, "gmirror:lock");
bioq_init(&sc->sc_queue);
mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
@ -2989,12 +3008,8 @@ g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
sc->sc_name);
g_destroy_geom(sc->sc_sync.ds_geom);
mtx_destroy(&sc->sc_done_mtx);
mtx_destroy(&sc->sc_events_mtx);
mtx_destroy(&sc->sc_queue_mtx);
sx_destroy(&sc->sc_lock);
g_destroy_geom(sc->sc_geom);
free(sc, M_MIRROR);
g_mirror_free_device(sc);
return (NULL);
}
@ -3017,8 +3032,6 @@ g_mirror_destroy(struct g_mirror_softc *sc, int how)
struct g_mirror_disk *disk;
g_topology_assert_not();
if (sc == NULL)
return (ENXIO);
sx_assert(&sc->sc_lock, SX_XLOCKED);
if (sc->sc_provider_open != 0 || SCHEDULER_STOPPED()) {
@ -3068,7 +3081,6 @@ g_mirror_destroy(struct g_mirror_softc *sc, int how)
G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
sx_xlock(&sc->sc_lock);
g_mirror_destroy_device(sc);
free(sc, M_MIRROR);
return (0);
}

View File

@ -207,6 +207,7 @@ struct g_mirror_softc {
int sc_idle; /* DIRTY flags removed. */
time_t sc_last_write;
u_int sc_writes;
u_int sc_refcnt; /* Number of softc references */
TAILQ_HEAD(, g_mirror_event) sc_events;
struct mtx sc_events_mtx;