Do not complete pending gmirror BIOs when tearing down the provider.

This will result in lock recursion and is more generally incorrect since
the completion handlers will just reinsert the BIOs into the queue we're
trying to drain.

Reviewed by:	imp, ngie
Approved by:	re (gjb)
MFC after:	3 weeks
Sponsored by:	EMC / Isilon Storage Division
Differential Revision:	https://reviews.freebsd.org/D6908
This commit is contained in:
Mark Johnston 2016-06-22 21:00:28 +00:00
parent b38eb9eac8
commit be20fc2e90
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=302091

View File

@ -2121,8 +2121,21 @@ g_mirror_destroy_provider(struct g_mirror_softc *sc)
g_topology_lock();
g_error_provider(sc->sc_provider, ENXIO);
mtx_lock(&sc->sc_queue_mtx);
while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL)
g_io_deliver(bp, ENXIO);
while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
/*
* Abort any pending I/O that wasn't generated by us.
* Synchronization requests and requests destined for individual
* mirror components can be destroyed immediately.
*/
if (bp->bio_to == sc->sc_provider &&
bp->bio_from->geom != sc->sc_sync.ds_geom) {
g_io_deliver(bp, ENXIO);
} else {
if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
free(bp->bio_data, M_MIRROR);
g_destroy_bio(bp);
}
}
mtx_unlock(&sc->sc_queue_mtx);
G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
sc->sc_provider->name);