Fix GEOM_GATE orphanization.
Previous code closed and destroyed direct read consumer even with I/O still in progress. This patch adds locking and request counting to postpone the close till the last of running requests completes. MFC after: 2 weeks Sponsored by: iXsystems, Inc.
This commit is contained in:
parent
1c7ecd25bb
commit
56bbe48921
@ -89,6 +89,19 @@ static struct g_gate_softc **g_gate_units;
|
||||
static u_int g_gate_nunits;
|
||||
static struct mtx g_gate_units_lock;
|
||||
|
||||
static void
|
||||
g_gate_detach(void *arg, int flags __unused)
|
||||
{
|
||||
struct g_consumer *cp = arg;
|
||||
|
||||
g_topology_assert();
|
||||
G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
|
||||
cp->provider->name);
|
||||
(void)g_access(cp, -1, 0, 0);
|
||||
g_detach(cp);
|
||||
g_destroy_consumer(cp);
|
||||
}
|
||||
|
||||
static int
|
||||
g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
|
||||
{
|
||||
@ -140,6 +153,7 @@ g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
|
||||
g_gate_nunits--;
|
||||
mtx_unlock(&g_gate_units_lock);
|
||||
mtx_destroy(&sc->sc_queue_mtx);
|
||||
mtx_destroy(&sc->sc_read_mtx);
|
||||
g_topology_lock();
|
||||
if ((cp = sc->sc_readcons) != NULL) {
|
||||
sc->sc_readcons = NULL;
|
||||
@ -208,8 +222,11 @@ g_gate_queue_io(struct bio *bp)
|
||||
static void
|
||||
g_gate_done(struct bio *cbp)
|
||||
{
|
||||
struct g_gate_softc *sc;
|
||||
struct bio *pbp;
|
||||
struct g_consumer *cp;
|
||||
|
||||
cp = cbp->bio_from;
|
||||
pbp = cbp->bio_parent;
|
||||
if (cbp->bio_error == 0) {
|
||||
pbp->bio_completed = cbp->bio_completed;
|
||||
@ -222,12 +239,20 @@ g_gate_done(struct bio *cbp)
|
||||
pbp->bio_children--;
|
||||
g_gate_queue_io(pbp);
|
||||
}
|
||||
|
||||
sc = cp->geom->softc;
|
||||
mtx_lock(&sc->sc_read_mtx);
|
||||
if (--cp->index == 0 && sc->sc_readcons != cp)
|
||||
g_post_event(g_gate_detach, cp, M_NOWAIT, NULL);
|
||||
mtx_unlock(&sc->sc_read_mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
g_gate_start(struct bio *pbp)
|
||||
{
|
||||
struct g_gate_softc *sc;
|
||||
struct g_consumer *cp;
|
||||
struct bio *cbp;
|
||||
|
||||
sc = pbp->bio_to->geom->softc;
|
||||
if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
|
||||
@ -237,21 +262,26 @@ g_gate_start(struct bio *pbp)
|
||||
G_GATE_LOGREQ(2, pbp, "Request received.");
|
||||
switch (pbp->bio_cmd) {
|
||||
case BIO_READ:
|
||||
if (sc->sc_readcons != NULL) {
|
||||
struct bio *cbp;
|
||||
|
||||
cbp = g_clone_bio(pbp);
|
||||
if (cbp == NULL) {
|
||||
g_io_deliver(pbp, ENOMEM);
|
||||
return;
|
||||
}
|
||||
cbp->bio_done = g_gate_done;
|
||||
cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
|
||||
cbp->bio_to = sc->sc_readcons->provider;
|
||||
g_io_request(cbp, sc->sc_readcons);
|
||||
if (sc->sc_readcons == NULL)
|
||||
break;
|
||||
cbp = g_clone_bio(pbp);
|
||||
if (cbp == NULL) {
|
||||
g_io_deliver(pbp, ENOMEM);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
mtx_lock(&sc->sc_read_mtx);
|
||||
if ((cp = sc->sc_readcons) == NULL) {
|
||||
mtx_unlock(&sc->sc_read_mtx);
|
||||
g_destroy_bio(cbp);
|
||||
pbp->bio_children--;
|
||||
break;
|
||||
}
|
||||
cp->index++;
|
||||
cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
|
||||
mtx_unlock(&sc->sc_read_mtx);
|
||||
cbp->bio_done = g_gate_done;
|
||||
g_io_request(cbp, cp);
|
||||
return;
|
||||
case BIO_DELETE:
|
||||
case BIO_WRITE:
|
||||
case BIO_FLUSH:
|
||||
@ -377,20 +407,18 @@ g_gate_orphan(struct g_consumer *cp)
|
||||
{
|
||||
struct g_gate_softc *sc;
|
||||
struct g_geom *gp;
|
||||
int done;
|
||||
|
||||
g_topology_assert();
|
||||
gp = cp->geom;
|
||||
sc = gp->softc;
|
||||
if (sc == NULL)
|
||||
return;
|
||||
KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp,
|
||||
sc->sc_readcons));
|
||||
sc->sc_readcons = NULL;
|
||||
G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
|
||||
cp->provider->name);
|
||||
(void)g_access(cp, -1, 0, 0);
|
||||
g_detach(cp);
|
||||
g_destroy_consumer(cp);
|
||||
mtx_lock(&sc->sc_read_mtx);
|
||||
if (sc->sc_readcons == cp)
|
||||
sc->sc_readcons = NULL;
|
||||
done = (cp->index == 0);
|
||||
mtx_unlock(&sc->sc_read_mtx);
|
||||
if (done)
|
||||
g_gate_detach(cp, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -483,6 +511,7 @@ g_gate_create(struct g_gate_ctl_create *ggio)
|
||||
bioq_init(&sc->sc_inqueue);
|
||||
bioq_init(&sc->sc_outqueue);
|
||||
mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
|
||||
mtx_init(&sc->sc_read_mtx, "gg:read", NULL, MTX_DEF);
|
||||
sc->sc_queue_count = 0;
|
||||
sc->sc_queue_size = ggio->gctl_maxcount;
|
||||
if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
|
||||
@ -596,6 +625,7 @@ fail2:
|
||||
fail1:
|
||||
mtx_unlock(&g_gate_units_lock);
|
||||
mtx_destroy(&sc->sc_queue_mtx);
|
||||
mtx_destroy(&sc->sc_read_mtx);
|
||||
free(sc, M_GATE);
|
||||
return (error);
|
||||
}
|
||||
@ -605,7 +635,7 @@ g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
|
||||
{
|
||||
struct g_provider *pp;
|
||||
struct g_consumer *cp;
|
||||
int error;
|
||||
int done, error;
|
||||
|
||||
if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
|
||||
if (ggio->gctl_mediasize <= 0) {
|
||||
@ -628,13 +658,15 @@ g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
|
||||
|
||||
if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
|
||||
g_topology_lock();
|
||||
if (sc->sc_readcons != NULL) {
|
||||
cp = sc->sc_readcons;
|
||||
mtx_lock(&sc->sc_read_mtx);
|
||||
if ((cp = sc->sc_readcons) != NULL) {
|
||||
sc->sc_readcons = NULL;
|
||||
(void)g_access(cp, -1, 0, 0);
|
||||
g_detach(cp);
|
||||
g_destroy_consumer(cp);
|
||||
}
|
||||
done = (cp->index == 0);
|
||||
mtx_unlock(&sc->sc_read_mtx);
|
||||
if (done)
|
||||
g_gate_detach(cp, 0);
|
||||
} else
|
||||
mtx_unlock(&sc->sc_read_mtx);
|
||||
if (ggio->gctl_readprov[0] != '\0') {
|
||||
pp = g_provider_by_name(ggio->gctl_readprov);
|
||||
if (pp == NULL) {
|
||||
|
@ -91,16 +91,15 @@ struct g_gate_softc {
|
||||
uint32_t sc_queue_count; /* P: sc_queue_mtx */
|
||||
uint32_t sc_queue_size; /* P: (read-only) */
|
||||
u_int sc_timeout; /* P: (read-only) */
|
||||
struct g_consumer *sc_readcons; /* P: XXX */
|
||||
off_t sc_readoffset; /* P: XXX */
|
||||
struct g_consumer *sc_readcons; /* P: sc_read_mtx */
|
||||
off_t sc_readoffset; /* P: sc_read_mtx */
|
||||
struct callout sc_callout; /* P: (modified only
|
||||
from callout
|
||||
thread) */
|
||||
uintptr_t sc_seq; /* P: (modified only
|
||||
from g_down
|
||||
thread) */
|
||||
uintptr_t sc_seq; /* P: sc_queue_mtx */
|
||||
LIST_ENTRY(g_gate_softc) sc_next; /* P: g_gate_list_mtx */
|
||||
char sc_info[G_GATE_INFOSIZE]; /* P: (read-only) */
|
||||
struct mtx sc_read_mtx;
|
||||
};
|
||||
|
||||
#define G_GATE_DEBUG(lvl, ...) \
|
||||
|
Loading…
x
Reference in New Issue
Block a user