Use atomic for start_count in devstat_start_transaction().

Combined with earlier nstart/nend removal it allows to remove several locks
from request path of GEOM and few other places.  It would be cool if we had
more SMP-friendly statistics, but this helps too.

Sponsored by:	iXsystems, Inc.
This commit is contained in:
Alexander Motin 2019-12-30 03:13:38 +00:00
parent 9367fb301c
commit 024932aae9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=356200
7 changed files with 12 additions and 57 deletions

View File

@ -580,9 +580,7 @@ ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
DPRINTF("entered\n");
binuptime(&beio->ds_t0);
mtx_lock(&be_lun->io_lock);
devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
mtx_unlock(&be_lun->io_lock);
(void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
@ -663,9 +661,7 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
}
binuptime(&beio->ds_t0);
mtx_lock(&be_lun->io_lock);
devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
mtx_unlock(&be_lun->io_lock);
if (beio->bio_cmd == BIO_READ) {
vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
@ -894,9 +890,7 @@ ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
}
binuptime(&beio->ds_t0);
mtx_lock(&be_lun->io_lock);
devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
mtx_unlock(&be_lun->io_lock);
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw) {
@ -1034,9 +1028,7 @@ ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
beio->send_complete = 1;
binuptime(&beio->ds_t0);
mtx_lock(&be_lun->io_lock);
devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
mtx_unlock(&be_lun->io_lock);
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw) {
@ -1107,9 +1099,7 @@ ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
DPRINTF("entered\n");
binuptime(&beio->ds_t0);
mtx_lock(&be_lun->io_lock);
devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
mtx_unlock(&be_lun->io_lock);
if (beio->io_offset == -1) {
beio->io_len = 0;
@ -1186,11 +1176,9 @@ ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
beio->num_bios_sent++;
}
}
binuptime(&beio->ds_t0);
mtx_lock(&be_lun->io_lock);
devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
beio->send_complete = 1;
mtx_unlock(&be_lun->io_lock);
binuptime(&beio->ds_t0);
devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
/*
* Fire off all allocated requests!

View File

@ -247,7 +247,6 @@ struct md_s {
LIST_ENTRY(md_s) list;
struct bio_queue_head bio_queue;
struct mtx queue_mtx;
struct mtx stat_mtx;
struct cdev *dev;
enum md_types type;
off_t mediasize;
@ -477,9 +476,7 @@ g_md_start(struct bio *bp)
sc = bp->bio_to->geom->softc;
if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
mtx_lock(&sc->stat_mtx);
devstat_start_transaction_bio(sc->devstat, bp);
mtx_unlock(&sc->stat_mtx);
}
mtx_lock(&sc->queue_mtx);
bioq_disksort(&sc->bio_queue, bp);
@ -1274,7 +1271,6 @@ mdnew(int unit, int *errp, enum md_types type)
sc->type = type;
bioq_init(&sc->bio_queue);
mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
sc->unit = unit;
sprintf(sc->name, "md%d", unit);
LIST_INSERT_HEAD(&md_softc_list, sc, list);
@ -1282,7 +1278,6 @@ mdnew(int unit, int *errp, enum md_types type)
if (error == 0)
return (sc);
LIST_REMOVE(sc, list);
mtx_destroy(&sc->stat_mtx);
mtx_destroy(&sc->queue_mtx);
free_unr(md_uh, sc->unit);
free(sc, M_MD);
@ -1512,7 +1507,6 @@ mddestroy(struct md_s *sc, struct thread *td)
while (!(sc->flags & MD_EXITING))
msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
mtx_unlock(&sc->queue_mtx);
mtx_destroy(&sc->stat_mtx);
mtx_destroy(&sc->queue_mtx);
if (sc->vnode != NULL) {
vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);

View File

@ -413,9 +413,7 @@ nvdimm_spa_g_start(struct bio *bp)
sc = bp->bio_to->geom->softc;
if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
mtx_lock(&sc->spa_g_stat_mtx);
devstat_start_transaction_bio(sc->spa_g_devstat, bp);
mtx_unlock(&sc->spa_g_stat_mtx);
}
mtx_lock(&sc->spa_g_mtx);
bioq_disksort(&sc->spa_g_queue, bp);
@ -544,14 +542,12 @@ nvdimm_spa_g_create(struct nvdimm_spa_dev *dev, const char *name)
sc->dev = dev;
bioq_init(&sc->spa_g_queue);
mtx_init(&sc->spa_g_mtx, "spag", NULL, MTX_DEF);
mtx_init(&sc->spa_g_stat_mtx, "spagst", NULL, MTX_DEF);
sc->spa_g_proc_run = true;
sc->spa_g_proc_exiting = false;
error = kproc_create(nvdimm_spa_g_thread, sc, &sc->spa_g_proc, 0, 0,
"g_spa");
if (error != 0) {
mtx_destroy(&sc->spa_g_mtx);
mtx_destroy(&sc->spa_g_stat_mtx);
free(sc, M_NVDIMM);
printf("NVDIMM %s cannot create geom worker, error %d\n", name,
error);
@ -621,7 +617,6 @@ nvdimm_spa_g_destroy_geom(struct gctl_req *req, struct g_class *cp,
sc->spa_g_devstat = NULL;
}
mtx_destroy(&sc->spa_g_mtx);
mtx_destroy(&sc->spa_g_stat_mtx);
free(sc, M_NVDIMM);
return (0);
}

View File

@ -129,7 +129,6 @@ struct g_spa {
struct g_provider *spa_p;
struct bio_queue_head spa_g_queue;
struct mtx spa_g_mtx;
struct mtx spa_g_stat_mtx;
struct devstat *spa_g_devstat;
struct proc *spa_g_proc;
bool spa_g_proc_run;

View File

@ -64,13 +64,12 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
struct g_disk_softc {
struct mtx done_mtx;
struct disk *dp;
struct sysctl_ctx_list sysctl_ctx;
struct sysctl_oid *sysctl_tree;
char led[64];
uint32_t state;
struct mtx start_mtx;
struct mtx done_mtx;
};
static g_access_t g_disk_access;
@ -473,9 +472,7 @@ g_disk_start(struct bio *bp)
bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize;
bp2->bio_bcount = bp2->bio_length;
bp2->bio_disk = dp;
mtx_lock(&sc->start_mtx);
devstat_start_transaction_bio(dp->d_devstat, bp2);
mtx_unlock(&sc->start_mtx);
dp->d_strategy(bp2);
if (bp3 == NULL)
@ -559,9 +556,7 @@ g_disk_start(struct bio *bp)
}
bp2->bio_done = g_disk_done;
bp2->bio_disk = dp;
mtx_lock(&sc->start_mtx);
devstat_start_transaction_bio(dp->d_devstat, bp2);
mtx_unlock(&sc->start_mtx);
dp->d_strategy(bp2);
break;
default:
@ -709,7 +704,6 @@ g_disk_create(void *arg, int flag)
mtx_pool_unlock(mtxpool_sleep, dp);
sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
mtx_init(&sc->start_mtx, "g_disk_start", NULL, MTX_DEF);
mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF);
sc->dp = dp;
gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit);
@ -795,7 +789,6 @@ g_disk_providergone(struct g_provider *pp)
pp->private = NULL;
pp->geom->softc = NULL;
mtx_destroy(&sc->done_mtx);
mtx_destroy(&sc->start_mtx);
g_free(sc);
}

View File

@ -490,7 +490,6 @@ void
g_io_request(struct bio *bp, struct g_consumer *cp)
{
struct g_provider *pp;
struct mtx *mtxp;
int direct, error, first;
uint8_t cmd;
@ -545,11 +544,19 @@ g_io_request(struct bio *bp, struct g_consumer *cp)
KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
("Bio already on queue bp=%p", bp));
if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
binuptime(&bp->bio_t0);
else
getbinuptime(&bp->bio_t0);
if (g_collectstats & G_STATS_CONSUMERS)
devstat_start_transaction(cp->stat, &bp->bio_t0);
if (g_collectstats & G_STATS_PROVIDERS)
devstat_start_transaction(pp->stat, &bp->bio_t0);
#ifdef INVARIANTS
atomic_add_int(&cp->nstart, 1);
#endif
#ifdef GET_STACK_USAGE
direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
@ -569,22 +576,6 @@ g_io_request(struct bio *bp, struct g_consumer *cp)
direct = 0;
#endif
/*
* The statistics collection is lockless, as such, but we
* can not update one instance of the statistics from more
* than one thread at a time, so grab the lock first.
*/
mtxp = mtx_pool_find(mtxpool_sleep, pp);
mtx_lock(mtxp);
if (g_collectstats & G_STATS_PROVIDERS)
devstat_start_transaction(pp->stat, &bp->bio_t0);
if (g_collectstats & G_STATS_CONSUMERS)
devstat_start_transaction(cp->stat, &bp->bio_t0);
#ifdef INVARIANTS
cp->nstart++;
#endif
mtx_unlock(mtxp);
if (direct) {
error = g_io_check(bp);
if (error >= 0) {

View File

@ -227,8 +227,6 @@ void
devstat_start_transaction(struct devstat *ds, const struct bintime *now)
{
mtx_assert(&devstat_mutex, MA_NOTOWNED);
/* sanity check */
if (ds == NULL)
return;
@ -239,13 +237,12 @@ devstat_start_transaction(struct devstat *ds, const struct bintime *now)
* to busy. The start time is really the start of the latest busy
* period.
*/
if (ds->start_count == ds->end_count) {
if (atomic_fetchadd_int(&ds->start_count, 1) == ds->end_count) {
if (now != NULL)
ds->busy_from = *now;
else
binuptime(&ds->busy_from);
}
ds->start_count++;
atomic_add_rel_int(&ds->sequence0, 1);
DTRACE_DEVSTAT_START();
}
@ -254,8 +251,6 @@ void
devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
{
mtx_assert(&devstat_mutex, MA_NOTOWNED);
/* sanity check */
if (ds == NULL)
return;