Keep statistics on number of BIO_READ, BIO_WRITE, BIO_DELETE and BIO_FLUSH
requests as well as number of activemap updates. Number of BIO_WRITEs and activemap updates are especially interesting, because if those two are too close to each other, it means that your workload needs bigger number of dirty extents. Activemap should be updated as rarely as possible. MFC after: 1 week
This commit is contained in:
parent
bef2f8f5d1
commit
42a14e17b5
@ -341,6 +341,17 @@ control_status(struct nv *nv)
|
||||
printf(" dirty: %ju (%NB)\n",
|
||||
(uintmax_t)nv_get_uint64(nv, "dirty%u", ii),
|
||||
(intmax_t)nv_get_uint64(nv, "dirty%u", ii));
|
||||
printf(" statistics:\n");
|
||||
printf(" reads: %ju\n",
|
||||
(uint64_t)nv_get_uint64(nv, "stat_read%u", ii));
|
||||
printf(" writes: %ju\n",
|
||||
(uint64_t)nv_get_uint64(nv, "stat_write%u", ii));
|
||||
printf(" deletes: %ju\n",
|
||||
(uint64_t)nv_get_uint64(nv, "stat_delete%u", ii));
|
||||
printf(" flushes: %ju\n",
|
||||
(uint64_t)nv_get_uint64(nv, "stat_flush%u", ii));
|
||||
printf(" activemap updates: %ju\n",
|
||||
(uint64_t)nv_get_uint64(nv, "stat_activemap_update%u", ii));
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
@ -199,6 +199,16 @@ control_status_worker(struct hast_resource *res, struct nv *nvout,
|
||||
"extentsize%u", no);
|
||||
nv_add_uint32(nvout, nv_get_uint32(cnvin, "keepdirty"),
|
||||
"keepdirty%u", no);
|
||||
nv_add_uint64(nvout, nv_get_uint64(cnvin, "stat_read"),
|
||||
"stat_read%u", no);
|
||||
nv_add_uint64(nvout, nv_get_uint64(cnvin, "stat_write"),
|
||||
"stat_write%u", no);
|
||||
nv_add_uint64(nvout, nv_get_uint64(cnvin, "stat_delete"),
|
||||
"stat_delete%u", no);
|
||||
nv_add_uint64(nvout, nv_get_uint64(cnvin, "stat_flush"),
|
||||
"stat_flush%u", no);
|
||||
nv_add_uint64(nvout, nv_get_uint64(cnvin, "stat_activemap_update"),
|
||||
"stat_activemap_update%u", no);
|
||||
end:
|
||||
if (cnvin != NULL)
|
||||
nv_free(cnvin);
|
||||
@ -446,6 +456,13 @@ ctrl_thread(void *arg)
|
||||
nv_add_uint32(nvout, (uint32_t)0, "keepdirty");
|
||||
nv_add_uint64(nvout, (uint64_t)0, "dirty");
|
||||
}
|
||||
nv_add_uint64(nvout, res->hr_stat_read, "stat_read");
|
||||
nv_add_uint64(nvout, res->hr_stat_write, "stat_write");
|
||||
nv_add_uint64(nvout, res->hr_stat_delete,
|
||||
"stat_delete");
|
||||
nv_add_uint64(nvout, res->hr_stat_flush, "stat_flush");
|
||||
nv_add_uint64(nvout, res->hr_stat_activemap_update,
|
||||
"stat_activemap_update");
|
||||
nv_add_int16(nvout, 0, "error");
|
||||
break;
|
||||
case CONTROL_RELOAD:
|
||||
|
@ -218,6 +218,17 @@ struct hast_resource {
|
||||
/* Locked used to synchronize access to hr_amp. */
|
||||
pthread_mutex_t hr_amp_lock;
|
||||
|
||||
/* Number of BIO_READ requests. */
|
||||
uint64_t hr_stat_read;
|
||||
/* Number of BIO_WRITE requests. */
|
||||
uint64_t hr_stat_write;
|
||||
/* Number of BIO_DELETE requests. */
|
||||
uint64_t hr_stat_delete;
|
||||
/* Number of BIO_FLUSH requests. */
|
||||
uint64_t hr_stat_flush;
|
||||
/* Number of activemap updates. */
|
||||
uint64_t hr_stat_activemap_update;
|
||||
|
||||
/* Next resource. */
|
||||
TAILQ_ENTRY(hast_resource) hr_next;
|
||||
};
|
||||
|
@ -1117,6 +1117,7 @@ ggate_recv_thread(void *arg)
|
||||
*/
|
||||
switch (ggio->gctl_cmd) {
|
||||
case BIO_READ:
|
||||
res->hr_stat_read++;
|
||||
pjdlog_debug(2,
|
||||
"ggate_recv: (%p) Moving request to the send queue.",
|
||||
hio);
|
||||
@ -1145,6 +1146,7 @@ ggate_recv_thread(void *arg)
|
||||
QUEUE_INSERT1(hio, send, ncomp);
|
||||
break;
|
||||
case BIO_WRITE:
|
||||
res->hr_stat_write++;
|
||||
if (res->hr_resuid == 0) {
|
||||
/*
|
||||
* This is first write, initialize localcnt and
|
||||
@ -1183,12 +1185,21 @@ ggate_recv_thread(void *arg)
|
||||
mtx_lock(&res->hr_amp_lock);
|
||||
if (activemap_write_start(res->hr_amp,
|
||||
ggio->gctl_offset, ggio->gctl_length)) {
|
||||
res->hr_stat_activemap_update++;
|
||||
(void)hast_activemap_flush(res);
|
||||
}
|
||||
mtx_unlock(&res->hr_amp_lock);
|
||||
/* FALLTHROUGH */
|
||||
case BIO_DELETE:
|
||||
case BIO_FLUSH:
|
||||
switch (ggio->gctl_cmd) {
|
||||
case BIO_DELETE:
|
||||
res->hr_stat_delete++;
|
||||
break;
|
||||
case BIO_FLUSH:
|
||||
res->hr_stat_flush++;
|
||||
break;
|
||||
}
|
||||
pjdlog_debug(2,
|
||||
"ggate_recv: (%p) Moving request to the send queues.",
|
||||
hio);
|
||||
|
@ -612,6 +612,20 @@ recv_thread(void *arg)
|
||||
QUEUE_INSERT(send, hio);
|
||||
continue;
|
||||
}
|
||||
switch (hio->hio_cmd) {
|
||||
case HIO_READ:
|
||||
res->hr_stat_read++;
|
||||
break;
|
||||
case HIO_WRITE:
|
||||
res->hr_stat_write++;
|
||||
break;
|
||||
case HIO_DELETE:
|
||||
res->hr_stat_delete++;
|
||||
break;
|
||||
case HIO_FLUSH:
|
||||
res->hr_stat_flush++;
|
||||
break;
|
||||
}
|
||||
reqlog(LOG_DEBUG, 2, -1, hio,
|
||||
"recv: (%p) Got request header: ", hio);
|
||||
if (hio->hio_cmd == HIO_KEEPALIVE) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user