Option to save latency measurements to a file.

This commit is contained in:
Jacob Leverich 2013-07-01 10:08:16 -07:00
parent ebd96ada3c
commit d7a1ada9d3
3 changed files with 25 additions and 1 deletions

View File

@ -8,6 +8,7 @@
#include <vector>
#include "mutilate.h"
#include "Operation.h"
#define _POW 1.1
@ -16,6 +17,8 @@ class LogHistogramSampler {
public:
std::vector<uint64_t> bins;
std::vector<Operation> samples;
double sum;
double sum_sq;
@ -28,6 +31,7 @@ public:
void sample(const Operation &op) {
sample(op.time());
if (args.save_given) samples.push_back(op);
}
void sample(double s) {
@ -97,6 +101,8 @@ public:
sum += h.sum;
sum_sq += h.sum_sq;
for (auto i: h.samples) samples.push_back(i);
}
};

View File

@ -38,7 +38,6 @@ option "depth" d "Maximum depth to pipeline requests." int default="1"
option "roundrobin" R "Assign threads to servers in round-robin fashion. \
By default, each thread connects to every server."
option "cork" - "Minimum timer interval, in usecs. (experimental)" int
option "iadist" i "Inter-arrival distribution (distribution). Note: \
The distribution will automatically be adjusted to match the QPS given \
by --qps." string default="exponential"
@ -55,6 +54,7 @@ option "no_nodelay" - "Don't use TCP_NODELAY."
option "warmup" w "Warmup time before starting measurement." int
option "wait" W "Time to wait after startup to start measurement." int
option "save" - "Record latency samples to given file." string
option "search" - "Search for the QPS where N-order statistic < Xus. \
(i.e. --search 95:1000 means find the QPS where 95% of requests are \

View File

@ -610,6 +610,18 @@ int main(int argc, char **argv) {
printf("TX %10" PRIu64 " bytes : %6.1f MB/s\n",
stats.tx_bytes,
(double) stats.tx_bytes / 1024 / 1024 / (stats.stop - stats.start));
if (args.save_given) {
printf("Saving latency samples to %s.\n", args.save_arg);
FILE *file;
if ((file = fopen(args.save_arg, "w")) == NULL)
DIE("--save: failed to open %s: %s", args.save_arg, strerror(errno));
for (auto i: stats.get_sampler.samples) {
fprintf(file, "%f %f\n", i.start_time - boot_time, i.time());
}
}
}
// if (args.threads_arg > 1)
@ -963,6 +975,9 @@ void do_mutilate(const vector<string>& servers, options_t& options,
}
#endif
if (master && !args.scan_given && !args.search_given)
V("started at %f", get_time());
start = get_time();
for (Connection *conn: connections) {
conn->start_time = start;
@ -992,6 +1007,9 @@ void do_mutilate(const vector<string>& servers, options_t& options,
else break;
}
if (master && !args.scan_given && !args.search_given)
V("stopped at %f options.time = %d", get_time(), options.time);
// Tear-down and accumulate stats.
for (Connection *conn: connections) {
stats.accumulate(conn->stats);