Change 'load' balancing mode algorithm:

- Instead of measuring last request execution time for each drive and
choosing one with smallest time, use averaged number of requests, running
on each drive. This information is more accurate and timely. It allows to
distribute load between drives in more even and predictable way.
- For each drive track offset of the last submitted request. If new request
offset matches previous one or close for some drive, prefer that drive.
It allows to significantly speedup simultaneous sequential reads.

PR:		kern/113885
Reviewed by:	sobomax
This commit is contained in:
Alexander Motin 2009-12-03 21:47:51 +00:00
parent 9e9a895ea7
commit 891852cc12
2 changed files with 26 additions and 30 deletions

View File

@ -451,9 +451,6 @@ g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
disk->d_id = md->md_did;
disk->d_state = G_MIRROR_DISK_STATE_NONE;
disk->d_priority = md->md_priority;
disk->d_delay.sec = 0;
disk->d_delay.frac = 0;
binuptime(&disk->d_last_used);
disk->d_flags = md->md_dflags;
if (md->md_provider[0] != '\0')
disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
@ -862,16 +859,6 @@ bintime_cmp(struct bintime *bt1, struct bintime *bt2)
return (0);
}
static void
g_mirror_update_delay(struct g_mirror_disk *disk, struct bio *bp)
{
if (disk->d_softc->sc_balance != G_MIRROR_BALANCE_LOAD)
return;
binuptime(&disk->d_delay);
bintime_sub(&disk->d_delay, &bp->bio_t0);
}
static void
g_mirror_done(struct bio *bp)
{
@ -904,8 +891,6 @@ g_mirror_regular_request(struct bio *bp)
g_topology_lock();
g_mirror_kill_consumer(sc, bp->bio_from);
g_topology_unlock();
} else {
g_mirror_update_delay(disk, bp);
}
pbp->bio_inbed++;
@ -1465,30 +1450,35 @@ g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
g_io_request(cbp, cp);
}
#define TRACK_SIZE (1 * 1024 * 1024)
#define LOAD_SCALE 256
#define ABS(x) (((x) >= 0) ? (x) : (-(x)))
static void
g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
{
struct g_mirror_disk *disk, *dp;
struct g_consumer *cp;
struct bio *cbp;
struct bintime curtime;
int prio, best;
binuptime(&curtime);
/*
* Find a disk which the smallest load.
*/
/* Find a disk with the smallest load. */
disk = NULL;
best = INT_MAX;
LIST_FOREACH(dp, &sc->sc_disks, d_next) {
if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
continue;
/* If disk wasn't used for more than 2 sec, use it. */
if (curtime.sec - dp->d_last_used.sec >= 2) {
disk = dp;
break;
}
if (disk == NULL ||
bintime_cmp(&dp->d_delay, &disk->d_delay) < 0) {
prio = dp->load;
/* If disk head is precisely in position - highly prefer it. */
if (dp->d_last_offset == bp->bio_offset)
prio -= 2 * LOAD_SCALE;
else
/* If disk head is close to position - prefer it. */
if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE)
prio -= 1 * LOAD_SCALE;
if (prio <= best) {
disk = dp;
best = prio;
}
}
KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name));
@ -1505,12 +1495,18 @@ g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
cp = disk->d_consumer;
cbp->bio_done = g_mirror_done;
cbp->bio_to = cp->provider;
binuptime(&disk->d_last_used);
G_MIRROR_LOGREQ(3, cbp, "Sending request.");
KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
cp->acw, cp->ace));
cp->index++;
/* Remember last head position */
disk->d_last_offset = bp->bio_offset + bp->bio_length;
/* Update loads. */
LIST_FOREACH(dp, &sc->sc_disks, d_next) {
dp->load = (dp->d_consumer->index * LOAD_SCALE +
dp->load * 7) / 8;
}
g_io_request(cbp, cp);
}

View File

@ -133,8 +133,8 @@ struct g_mirror_disk {
struct g_mirror_softc *d_softc; /* Back-pointer to softc. */
int d_state; /* Disk state. */
u_int d_priority; /* Disk priority. */
struct bintime d_delay; /* Disk delay. */
struct bintime d_last_used; /* When disk was last used. */
u_int load; /* Averaged queue length */
off_t d_last_offset; /* Last read offset */
uint64_t d_flags; /* Additional flags. */
u_int d_genid; /* Disk's generation ID. */
struct g_mirror_disk_sync d_sync;/* Sync information. */