- Split the queue mutex into one for the event queue and one for the BIO queue,
as they do not really relate and to prepare for an additional queue to be covered by the BIO queue mutex. - Implement wrappers for fetching the next element from the event queue as well as for putting a new element into the BIO queue.
This commit is contained in:
parent
c3148a6894
commit
2857e7c796
@ -80,6 +80,17 @@ gv_orphan(struct g_consumer *cp)
|
||||
gv_post_event(sc, GV_EVENT_DRIVE_LOST, d, NULL, 0, 0);
|
||||
}
|
||||
|
||||
void
|
||||
gv_post_bio(struct gv_softc *sc, struct bio *bp)
|
||||
{
|
||||
|
||||
KASSERT(sc != NULL, ("NULL sc"));
|
||||
mtx_lock(&sc->bqueue_mtx);
|
||||
bioq_disksort(sc->bqueue, bp);
|
||||
wakeup(sc);
|
||||
mtx_unlock(&sc->bqueue_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
gv_start(struct bio *bp)
|
||||
{
|
||||
@ -100,10 +111,7 @@ gv_start(struct bio *bp)
|
||||
return;
|
||||
}
|
||||
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
bioq_disksort(sc->bqueue, bp);
|
||||
wakeup(sc);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
gv_post_bio(sc, bp);
|
||||
}
|
||||
|
||||
void
|
||||
@ -118,10 +126,7 @@ gv_done(struct bio *bp)
|
||||
sc = gp->softc;
|
||||
bp->bio_cflags |= GV_BIO_DONE;
|
||||
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
bioq_disksort(sc->bqueue, bp);
|
||||
wakeup(sc);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
gv_post_bio(sc, bp);
|
||||
}
|
||||
|
||||
int
|
||||
@ -181,7 +186,8 @@ gv_init(struct g_class *mp)
|
||||
LIST_INIT(&sc->volumes);
|
||||
TAILQ_INIT(&sc->equeue);
|
||||
mtx_init(&sc->config_mtx, "gv_config", NULL, MTX_DEF);
|
||||
mtx_init(&sc->queue_mtx, "gv_queue", NULL, MTX_DEF);
|
||||
mtx_init(&sc->equeue_mtx, "gv_equeue", NULL, MTX_DEF);
|
||||
mtx_init(&sc->bqueue_mtx, "gv_bqueue", NULL, MTX_DEF);
|
||||
kproc_create(gv_worker, sc, NULL, 0, 0, "gv_worker");
|
||||
}
|
||||
|
||||
@ -637,13 +643,11 @@ gv_worker(void *arg)
|
||||
|
||||
sc = arg;
|
||||
KASSERT(sc != NULL, ("NULL sc"));
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
for (;;) {
|
||||
/* Look at the events first... */
|
||||
ev = TAILQ_FIRST(&sc->equeue);
|
||||
ev = gv_get_event(sc);
|
||||
if (ev != NULL) {
|
||||
TAILQ_REMOVE(&sc->equeue, ev, events);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
gv_remove_event(sc, ev);
|
||||
|
||||
switch (ev->type) {
|
||||
case GV_EVENT_DRIVE_TASTED:
|
||||
@ -959,9 +963,11 @@ gv_worker(void *arg)
|
||||
case GV_EVENT_THREAD_EXIT:
|
||||
G_VINUM_DEBUG(2, "event 'thread exit'");
|
||||
g_free(ev);
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
mtx_lock(&sc->equeue_mtx);
|
||||
mtx_lock(&sc->bqueue_mtx);
|
||||
gv_cleanup(sc);
|
||||
mtx_destroy(&sc->queue_mtx);
|
||||
mtx_destroy(&sc->bqueue_mtx);
|
||||
mtx_destroy(&sc->equeue_mtx);
|
||||
g_free(sc->bqueue);
|
||||
g_free(sc);
|
||||
kproc_exit(ENXIO);
|
||||
@ -972,18 +978,18 @@ gv_worker(void *arg)
|
||||
}
|
||||
|
||||
g_free(ev);
|
||||
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* ... then do I/O processing. */
|
||||
mtx_lock(&sc->bqueue_mtx);
|
||||
bp = bioq_takefirst(sc->bqueue);
|
||||
if (bp == NULL) {
|
||||
msleep(sc, &sc->queue_mtx, PRIBIO, "-", hz/10);
|
||||
msleep(sc, &sc->bqueue_mtx, PRIBIO, "-", hz/10);
|
||||
mtx_unlock(&sc->bqueue_mtx);
|
||||
continue;
|
||||
}
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
mtx_unlock(&sc->bqueue_mtx);
|
||||
|
||||
/* A bio that is coming up from an underlying device. */
|
||||
if (bp->bio_cflags & GV_BIO_DONE) {
|
||||
@ -1009,8 +1015,6 @@ gv_worker(void *arg)
|
||||
} else {
|
||||
gv_volume_start(sc, bp);
|
||||
}
|
||||
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,9 +122,12 @@ int gv_detach_sd(struct gv_sd *, int);
|
||||
void gv_worker(void *);
|
||||
void gv_post_event(struct gv_softc *, int, void *, void *, intmax_t,
|
||||
intmax_t);
|
||||
struct gv_event *gv_get_event(struct gv_softc *);
|
||||
void gv_remove_event(struct gv_softc *, struct gv_event *);
|
||||
void gv_drive_tasted(struct gv_softc *, struct g_provider *);
|
||||
void gv_drive_lost(struct gv_softc *, struct gv_drive *);
|
||||
void gv_setup_objects(struct gv_softc *);
|
||||
void gv_post_bio(struct gv_softc *, struct bio *);
|
||||
void gv_start(struct bio *);
|
||||
int gv_access(struct g_provider *, int, int, int);
|
||||
void gv_cleanup(struct gv_softc *);
|
||||
|
@ -52,10 +52,33 @@ gv_post_event(struct gv_softc *sc, int event, void *arg1, void *arg2,
|
||||
ev->arg3 = arg3;
|
||||
ev->arg4 = arg4;
|
||||
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
mtx_lock(&sc->equeue_mtx);
|
||||
TAILQ_INSERT_TAIL(&sc->equeue, ev, events);
|
||||
wakeup(sc);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
mtx_unlock(&sc->equeue_mtx);
|
||||
}
|
||||
|
||||
struct gv_event *
|
||||
gv_get_event(struct gv_softc *sc)
|
||||
{
|
||||
struct gv_event *ev;
|
||||
|
||||
KASSERT(sc != NULL, ("NULL sc"));
|
||||
mtx_lock(&sc->equeue_mtx);
|
||||
ev = TAILQ_FIRST(&sc->equeue);
|
||||
mtx_unlock(&sc->equeue_mtx);
|
||||
return (ev);
|
||||
}
|
||||
|
||||
void
|
||||
gv_remove_event(struct gv_softc *sc, struct gv_event *ev)
|
||||
{
|
||||
|
||||
KASSERT(sc != NULL, ("NULL sc"));
|
||||
KASSERT(ev != NULL, ("NULL ev"));
|
||||
mtx_lock(&sc->equeue_mtx);
|
||||
TAILQ_REMOVE(&sc->equeue, ev, events);
|
||||
mtx_unlock(&sc->equeue_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -360,9 +360,7 @@ gv_plex_raid5_done(struct gv_plex *p, struct bio *bp)
|
||||
/* Bring the waiting bios back into the game. */
|
||||
pbp = bioq_takefirst(p->wqueue);
|
||||
while (pbp != NULL) {
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
bioq_disksort(sc->bqueue, pbp);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
gv_post_bio(sc, pbp);
|
||||
pbp = bioq_takefirst(p->wqueue);
|
||||
}
|
||||
}
|
||||
@ -406,9 +404,7 @@ gv_plex_raid5_done(struct gv_plex *p, struct bio *bp)
|
||||
/* Bring the waiting bios back into the game. */
|
||||
pbp = bioq_takefirst(p->wqueue);
|
||||
while (pbp != NULL) {
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
bioq_disksort(sc->bqueue, pbp);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
gv_post_bio(sc, pbp);
|
||||
pbp = bioq_takefirst(p->wqueue);
|
||||
}
|
||||
g_free(wp);
|
||||
@ -581,9 +577,7 @@ gv_sync_request(struct gv_plex *from, struct gv_plex *to, off_t offset,
|
||||
bp->bio_data = data;
|
||||
|
||||
/* Send down next. */
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
bioq_disksort(sc->bqueue, bp);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
gv_post_bio(sc, bp);
|
||||
//gv_plex_start(from, bp);
|
||||
return (0);
|
||||
}
|
||||
@ -696,9 +690,7 @@ gv_grow_request(struct gv_plex *p, off_t offset, off_t length, int type,
|
||||
bp->bio_cflags |= GV_BIO_MALLOC;
|
||||
bp->bio_data = data;
|
||||
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
bioq_disksort(sc->bqueue, bp);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
gv_post_bio(sc, bp);
|
||||
//gv_plex_start(p, bp);
|
||||
return (0);
|
||||
}
|
||||
@ -921,9 +913,7 @@ gv_parity_request(struct gv_plex *p, int flags, off_t offset)
|
||||
|
||||
/* We still have more parity to build. */
|
||||
bp->bio_offset = offset;
|
||||
mtx_lock(&sc->queue_mtx);
|
||||
bioq_disksort(sc->bqueue, bp);
|
||||
mtx_unlock(&sc->queue_mtx);
|
||||
gv_post_bio(sc, bp);
|
||||
//gv_plex_start(p, bp); /* Send it down to the plex. */
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,8 @@ struct gv_softc {
|
||||
LIST_HEAD(,gv_volume) volumes; /* All volumes. */
|
||||
|
||||
TAILQ_HEAD(,gv_event) equeue; /* Event queue. */
|
||||
struct mtx queue_mtx; /* Queue lock. */
|
||||
struct mtx equeue_mtx; /* Event queue lock. */
|
||||
struct mtx bqueue_mtx; /* BIO queue lock. */
|
||||
struct mtx config_mtx; /* Configuration lock. */
|
||||
struct bio_queue_head *bqueue; /* BIO queue. */
|
||||
struct g_geom *geom; /* Pointer to our VINUM geom. */
|
||||
|
Loading…
x
Reference in New Issue
Block a user