2023-01-17 20:22:43 +00:00
|
|
|
#include <cstring>
|
|
|
|
#include <csignal>
|
|
|
|
#include <cerrno>
|
|
|
|
#include <cstdio>
|
|
|
|
#include <iostream>
|
|
|
|
#include <sys/_pthreadtypes.h>
|
2023-03-02 14:12:35 +00:00
|
|
|
#include <sys/_stdint.h>
|
2023-01-17 20:22:43 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <vector>
|
|
|
|
#include <thread>
|
|
|
|
#include <set>
|
|
|
|
#include <list>
|
|
|
|
#include <unordered_map>
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
#include <sys/cpuset.h>
|
2023-01-05 16:17:48 +00:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/event.h>
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/tcp.h>
|
|
|
|
#include <arpa/inet.h>
|
2023-01-17 20:22:43 +00:00
|
|
|
#include <semaphore.h>
|
2023-01-05 16:17:48 +00:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <netdb.h>
|
2023-01-17 20:22:43 +00:00
|
|
|
#include <pthread.h>
|
|
|
|
#include <pthread_np.h>
|
2023-03-02 14:12:35 +00:00
|
|
|
#include <bsock/bsock.h>
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
#include "openssl/ssl.h"
|
|
|
|
#include "openssl/err.h"
|
|
|
|
#include "util.h"
|
2023-03-02 00:29:21 +00:00
|
|
|
#include "generator.h"
|
2023-01-17 20:22:43 +00:00
|
|
|
#include "logger.h"
|
|
|
|
#include "mod.h"
|
2023-03-02 00:29:21 +00:00
|
|
|
#include "msg.h"
|
2023-03-02 14:12:35 +00:00
|
|
|
#include "io.h"
|
|
|
|
#include "dmsg.h"
|
2023-01-17 20:22:43 +00:00
|
|
|
|
|
|
|
static constexpr int MAX_MOD_ARGS = 32;
|
|
|
|
static constexpr int MAX_MOD_ARG_LEN = 128;
|
|
|
|
static constexpr int MAX_SLAVES = 32;
|
|
|
|
static constexpr int NEVENT = 64;
|
2023-03-02 14:12:35 +00:00
|
|
|
static constexpr int CTRL_PORT = 15367;
|
|
|
|
static constexpr int CTRL_TIMEOUT = 3;
|
|
|
|
static constexpr int CTRL_BACKLOG = 4096;
|
|
|
|
static constexpr int BSOCK_BUF_SZ = 4096;
|
2023-01-17 20:22:43 +00:00
|
|
|
static constexpr int KQ_TIMER_MAGIC = 0x3355;
|
|
|
|
|
|
|
|
struct dsmbr_thread_ctx {
|
|
|
|
int tid;
|
|
|
|
int ctrl_pipe_r;
|
|
|
|
int ctrl_pipe_w;
|
|
|
|
int kqfd;
|
|
|
|
pthread_t thrd;
|
|
|
|
sem_t start_sem;
|
|
|
|
void * m_ctx;
|
|
|
|
std::vector<struct dsmbr_conn *> conns;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct dsmbr_request_record {
|
|
|
|
uint64_t recv_size;
|
|
|
|
uint64_t send_size;
|
|
|
|
uint64_t send_ts;
|
|
|
|
uint64_t recv_ts;
|
|
|
|
uint64_t epoch;
|
|
|
|
};
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
struct dsmbr_conn {
|
|
|
|
Generator *ia_gen;
|
|
|
|
SSL* ssl;
|
2023-03-02 14:12:35 +00:00
|
|
|
char * ssl_readbuf;
|
2023-01-17 20:22:43 +00:00
|
|
|
int conn_fd;
|
2023-03-02 14:12:35 +00:00
|
|
|
struct bsock * bsock;
|
|
|
|
struct ppd_bsock_io_ssl_ctx bsock_ctx;
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
int timer_expired;
|
|
|
|
void * m_ctx;
|
|
|
|
uint64_t next_send;
|
|
|
|
std::vector<struct dsmbr_request_record *> stats;
|
|
|
|
std::list<struct dsmbr_request_record *> req_in_flight;
|
|
|
|
};
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
struct dsmbr_slave {
|
|
|
|
int conn_fd;
|
|
|
|
/* stats */
|
|
|
|
uint64_t runtime;
|
|
|
|
uint64_t avg_pkt_sz;
|
|
|
|
uint64_t pkt_cnt;
|
|
|
|
};
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
struct dsmbr_options {
|
|
|
|
int verbose;
|
|
|
|
char server_ip[64];
|
|
|
|
int server_port;
|
|
|
|
int conn_per_thread;
|
|
|
|
int target_qps;
|
|
|
|
int depth;
|
|
|
|
int warmup;
|
|
|
|
int duration;
|
|
|
|
char ia_dist[128];
|
|
|
|
char output_file[128];
|
|
|
|
cpuset_t cpuset;
|
|
|
|
char module_path[128];
|
|
|
|
char* mod_argk[MAX_MOD_ARGS];
|
|
|
|
char* mod_argv[MAX_MOD_ARGS];
|
|
|
|
int mod_argc;
|
|
|
|
int enable_tls;
|
|
|
|
|
|
|
|
// master mode
|
|
|
|
char * slave_ips[MAX_SLAVES];
|
|
|
|
int num_slaves;
|
|
|
|
|
|
|
|
// global states
|
|
|
|
int is_master;
|
|
|
|
int is_slave;
|
|
|
|
int slave_sockfds[MAX_SLAVES];
|
|
|
|
int slave_ctrl_sock;
|
|
|
|
SSL_CTX * ssl_ctx;
|
|
|
|
|
|
|
|
ppd_mod_info * m_info;
|
|
|
|
void * m_global_ctx;
|
|
|
|
std::vector<struct dsmbr_thread_ctx *> workers;
|
2023-01-05 16:17:48 +00:00
|
|
|
};
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
static struct dsmbr_options options = {
|
|
|
|
.verbose = 0,
|
|
|
|
.server_ip = "127.0.0.1",
|
|
|
|
.conn_per_thread = 1,
|
|
|
|
.target_qps = 0,
|
|
|
|
.depth = 1,
|
|
|
|
.warmup = 0,
|
|
|
|
.duration = 5,
|
|
|
|
.ia_dist = "fb_ia",
|
|
|
|
.output_file = "samples.txt",
|
|
|
|
.cpuset = CPUSET_T_INITIALIZER(1),
|
|
|
|
.module_path = {0},
|
|
|
|
.mod_argc = 0,
|
|
|
|
.enable_tls = 0,
|
|
|
|
.slave_ips = {nullptr},
|
|
|
|
.num_slaves = 0,
|
|
|
|
.is_master = 0,
|
|
|
|
.is_slave = 0,
|
|
|
|
.slave_sockfds = {-1},
|
|
|
|
.slave_ctrl_sock = -1,
|
|
|
|
.ssl_ctx = nullptr,
|
|
|
|
.m_info = nullptr,
|
|
|
|
.m_global_ctx = nullptr
|
2023-01-05 16:17:48 +00:00
|
|
|
};
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
static void
|
|
|
|
dsmbr_dump_options()
|
|
|
|
{
|
|
|
|
// V ("Configuration:\n"
|
|
|
|
// " connections per thread: %d\n"
|
|
|
|
// " num threads: %d\n"
|
|
|
|
// " target QPS: %d\n"
|
|
|
|
// " warmup: %d\n"
|
|
|
|
// " duration: %d\n"
|
|
|
|
// " master mode: %d\n"
|
|
|
|
// " client mode: %d\n"
|
|
|
|
// " output file: %s\n"
|
|
|
|
// " server ip: %s\n"
|
|
|
|
// " server port: %d\n"
|
|
|
|
// " interarrival dist: %s\n"
|
|
|
|
// " num_workload_param: %d\n"
|
|
|
|
// " module path: %d\n",
|
|
|
|
// this->client_conn,
|
|
|
|
// this->client_thread_count,
|
|
|
|
// this->target_qps,
|
|
|
|
// this->warmup,
|
|
|
|
// this->duration,
|
|
|
|
// this->master_mode,
|
|
|
|
// this->client_mode,
|
|
|
|
// this->output_name,
|
|
|
|
// this->server_ip,
|
|
|
|
// this->server_port,
|
|
|
|
// this->generator_name,
|
|
|
|
// this->master_server_ip,
|
|
|
|
// this->workload_type,
|
|
|
|
// this->num_gen_params,
|
|
|
|
// this->global_conn_start_idx.load());
|
|
|
|
return;
|
|
|
|
}
|
2023-01-05 16:17:48 +00:00
|
|
|
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
static SSL *
|
|
|
|
dsmbr_tls_handshake_client(int conn_fd)
|
2023-01-05 16:17:48 +00:00
|
|
|
{
|
2023-01-17 20:22:43 +00:00
|
|
|
SSL *ssl;
|
|
|
|
int r;
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
ssl = SSL_new(options.ssl_ctx);
|
|
|
|
if (ssl == NULL) {
|
|
|
|
E("SSL_new() failed: %ld\n", ERR_get_error());
|
|
|
|
}
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
if (SSL_set_fd(ssl, conn_fd) == 0) {
|
|
|
|
E("SSL_set_fd() failed: %ld\n", ERR_get_error());
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
|
|
|
|
if ((r = SSL_connect(ssl)) <= 0) {
|
|
|
|
E("SSL_connect() failed: %ld\n", ERR_get_error());
|
|
|
|
}
|
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
return ssl;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsmbr_create_tls_context()
|
|
|
|
{
|
|
|
|
SSL_CTX *ctx;
|
|
|
|
|
|
|
|
ctx = SSL_CTX_new(TLS_client_method());
|
|
|
|
if (!ctx) {
|
|
|
|
E("SSL_CTX_new() failed: %ld\n", ERR_get_error());
|
2023-01-17 20:22:43 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
SSL_CTX_set_min_proto_version(ctx, TLS1_2_VERSION);
|
|
|
|
SSL_CTX_set_max_proto_version(ctx, TLS1_2_VERSION);
|
|
|
|
|
|
|
|
options.ssl_ctx = ctx;
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
static struct dsmbr_conn *
|
|
|
|
dsmbr_conn_create(struct dsmbr_thread_ctx * thrd_ctx)
|
2023-01-05 16:17:48 +00:00
|
|
|
{
|
2023-01-17 20:22:43 +00:00
|
|
|
struct sockaddr_in server_addr;
|
2023-03-02 14:12:35 +00:00
|
|
|
SSL * ssl = nullptr;
|
2023-01-17 20:22:43 +00:00
|
|
|
|
|
|
|
server_addr.sin_family = AF_INET;
|
|
|
|
server_addr.sin_port = htons(options.server_port);
|
|
|
|
server_addr.sin_addr.s_addr = inet_addr(options.server_ip);
|
|
|
|
|
|
|
|
int enable = 1;
|
2023-03-02 14:12:35 +00:00
|
|
|
struct timeval tv = { .tv_sec = CTRL_TIMEOUT, .tv_usec = 0 };
|
2023-01-17 20:22:43 +00:00
|
|
|
|
|
|
|
int conn_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
|
|
|
|
if (conn_fd == -1) {
|
|
|
|
E("socket() error: %d\n", errno);
|
|
|
|
}
|
|
|
|
if (setsockopt(conn_fd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&tv, sizeof(tv)) != 0) {
|
|
|
|
E("setsockopt() rcvtimeo: %d\n", errno);
|
|
|
|
}
|
|
|
|
if (setsockopt(conn_fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable)) != 0) {
|
|
|
|
E("setsockopt() reuseaddr: %d\n", errno);
|
|
|
|
}
|
|
|
|
if (setsockopt(conn_fd, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(enable)) != 0) {
|
|
|
|
E("setsockopt() reuseport: %d\n", errno);
|
|
|
|
}
|
|
|
|
if (setsockopt(conn_fd, IPPROTO_TCP, TCP_NODELAY, &enable, sizeof(enable)) != 0) {
|
|
|
|
E("setsockopt() nodelay: %d\n", errno);
|
|
|
|
}
|
|
|
|
if (connect(conn_fd, (struct sockaddr*)&server_addr, sizeof(server_addr)) != 0) {
|
|
|
|
E("connect() failed: %d\n", errno);
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
2023-03-02 14:12:35 +00:00
|
|
|
|
|
|
|
V("Established client connection %d...", conn_fd);
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
struct dsmbr_conn * conn = new struct dsmbr_conn;
|
|
|
|
conn->conn_fd = conn_fd;
|
2023-03-02 14:12:35 +00:00
|
|
|
struct bsock_ringbuf_io io;
|
|
|
|
void * bsock_ctx = nullptr;
|
|
|
|
if (options.enable_tls) {
|
|
|
|
V("Initiating TLS handshake on connection %d...", conn_fd);
|
|
|
|
conn->ssl = dsmbr_tls_handshake_client(conn_fd);
|
|
|
|
io = ppd_bsock_io_ssl();
|
|
|
|
conn->ssl_readbuf = new char[BSOCK_BUF_SZ];
|
|
|
|
conn->bsock_ctx.ssl_readbuf = conn->ssl_readbuf;
|
|
|
|
conn->bsock_ctx.ssl_readbuf_len = BSOCK_BUF_SZ;
|
|
|
|
conn->bsock_ctx.ssl = conn->ssl;
|
|
|
|
bsock_ctx = &conn->bsock_ctx;
|
|
|
|
} else {
|
|
|
|
io = bsock_io_posix();
|
|
|
|
conn->ssl_readbuf = nullptr;
|
|
|
|
conn->ssl = nullptr;
|
|
|
|
bsock_ctx = (void *)(uintptr_t)conn_fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->bsock = bsock_create(bsock_ctx, &io, BSOCK_BUF_SZ, BSOCK_BUF_SZ);
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
conn->next_send = 0;
|
2023-03-02 14:12:35 +00:00
|
|
|
conn->timer_expired = 0;
|
2023-01-17 20:22:43 +00:00
|
|
|
conn->ia_gen = createGenerator(options.ia_dist);
|
|
|
|
if (conn->ia_gen == NULL) {
|
|
|
|
E("Failed to create generator \"%s\"\n", options.ia_dist);
|
|
|
|
}
|
|
|
|
conn->ia_gen->set_lambda((double)options.target_qps / (double)(options.conn_per_thread * CPU_COUNT(&options.cpuset)));
|
|
|
|
|
|
|
|
int status;
|
|
|
|
if ((status = options.m_info->conn_create_cb(options.m_global_ctx, thrd_ctx->m_ctx, &conn->m_ctx)) != 0) {
|
|
|
|
E("Failed to create conn m_ctx: %d\n", status);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct kevent kev;
|
|
|
|
EV_SET(&kev, conn->conn_fd, EVFILT_READ, EV_ADD, 0, 0, conn);
|
|
|
|
if (kevent(thrd_ctx->kqfd, &kev, 1, NULL, 0, NULL) == -1) {
|
|
|
|
E("kevent() failed: %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
thrd_ctx->conns.push_back(conn);
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsmbr_conn_free(struct dsmbr_thread_ctx *ctx, struct dsmbr_conn *conn)
|
|
|
|
{
|
|
|
|
struct kevent kev;
|
|
|
|
EV_SET(&kev, conn->conn_fd, EVFILT_READ, EV_DELETE, 0, 0, conn);
|
|
|
|
if (kevent(ctx->kqfd, &kev, 1, NULL, 0, NULL) == -1) {
|
|
|
|
E("kevent() failed: %d\n", errno);
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
if (conn->ssl != nullptr) {
|
|
|
|
SSL_shutdown(conn->ssl);
|
|
|
|
SSL_free(conn->ssl);
|
2023-03-02 14:12:35 +00:00
|
|
|
delete[] conn->ssl_readbuf;
|
2023-01-17 20:22:43 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
bsock_free(conn->bsock);
|
2023-01-17 20:22:43 +00:00
|
|
|
close(conn->conn_fd);
|
|
|
|
|
|
|
|
for (auto it = ctx->conns.begin(); it != ctx->conns.end(); ) {
|
|
|
|
if (conn == *it) {
|
|
|
|
it = ctx->conns.erase(it);
|
|
|
|
} else {
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
delete conn->ia_gen;
|
2023-01-05 16:17:48 +00:00
|
|
|
delete conn;
|
|
|
|
}
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
|
|
|
|
void dsmbr_handle_event(struct dsmbr_thread_ctx *tinfo, struct kevent *kev)
|
|
|
|
{
|
|
|
|
int kev_fd = ;
|
|
|
|
|
|
|
|
if (kev_fd != conn->conn_fd && kev_fd != KQ_TIMER_MAGIC) {
|
|
|
|
E("kevent() returned unknown fd %d\n", kev_fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kev_fd == KQ_TIMER_MAGIC) {
|
|
|
|
conn->timer_expired = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int status;
|
|
|
|
struct kevent tmpev;
|
|
|
|
|
|
|
|
if (kev_fd != KQ_TIMER_MAGIC) {
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kev_fd == KQ_TIMER_MAGIC) {
|
|
|
|
conn->next_send = ;
|
|
|
|
EV_SET(&tmpev, KQ_TIMER_MAGIC, EVFILT_TIMER, EV_ADD | EV_ONESHOT, NOTE_USECONDS, conn->next_send, conn);
|
|
|
|
if (kevent(tinfo->kqfd, &tmpev, 1, nullptr, 0, nullptr) != 0) {
|
|
|
|
E("Error arming timer. ERR %d\n", conn->timer, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
case STATE_LIMITING:
|
|
|
|
if (conn->depth >= options.depth) {
|
|
|
|
return;
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
conn->state = STATE_WAITING;
|
|
|
|
break;
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
case STATE_SENDING: {
|
|
|
|
/* send one packet and transfer to wait state */
|
|
|
|
if (conn->depth >= options.depth) {
|
|
|
|
conn->state = STATE_LIMITING;
|
|
|
|
/* wait for the read to call us */
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
now = get_time_us();
|
|
|
|
if (now < (int64_t)conn->next_send) {
|
|
|
|
/* only STATE_WAITING transfers us to this state
|
|
|
|
* the above condition cannot be true
|
|
|
|
*/
|
|
|
|
E("Oscar fucked up\n");
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
|
|
|
|
conn->depth++;
|
|
|
|
conn->next_send += (int)(conn->gen->generate() * 1000000.0);
|
|
|
|
conn->last_send = now;
|
2023-01-05 16:17:48 +00:00
|
|
|
conn->state = STATE_WAITING;
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
if (conn->rgen->send_req(conn->conn_fd) < 0) {
|
|
|
|
/* effectively skipping this packet */
|
|
|
|
W("Cannot write to connection %d\n", conn->conn_fd);
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
|
|
|
|
break;
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
}
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
default:
|
|
|
|
E("Unknown state %d\n", conn->state);
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
if ((int)kev->ident == conn->conn_fd || (int)kev->ident == KQ_TIMER_MAGIC) {
|
2023-01-05 16:17:48 +00:00
|
|
|
/* we got something to read */
|
2023-01-17 20:22:43 +00:00
|
|
|
|
2023-01-05 16:17:48 +00:00
|
|
|
if (status < 0) {
|
|
|
|
E("Connection %d read_resp failed. ERR: %d\n", conn->conn_fd, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->depth--;
|
|
|
|
if (conn->depth < 0) {
|
|
|
|
E("More recved packets than sent.\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((long)cur_time >= options.warmup) {
|
|
|
|
thrd_perf_counters[id]->cnt++;
|
|
|
|
if (!options.client_mode) {
|
|
|
|
struct datapt* dat = new struct datapt;
|
|
|
|
dat->qps = 0;
|
|
|
|
dat->lat = get_time_us() - conn->last_send;
|
|
|
|
conn->stats->push_back(dat);
|
|
|
|
V("Conn %d: TS: %d LAT: %ld, QPS: %ld\n", conn->conn_fd, (int)cur_time, dat->lat, (long)dat->qps);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
kqconn_state_machine(conn);
|
2023-01-17 20:22:43 +00:00
|
|
|
} else
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
|
2023-01-05 16:17:48 +00:00
|
|
|
void
|
2023-02-24 23:25:52 +00:00
|
|
|
dsmbr_worker_main(void * ctx)
|
2023-01-05 16:17:48 +00:00
|
|
|
{
|
2023-01-17 20:22:43 +00:00
|
|
|
struct dsmbr_thread_ctx * tinfo = (struct dsmbr_thread_ctx * tinfo)ctx;
|
2023-01-05 16:17:48 +00:00
|
|
|
|
|
|
|
V("Thread %d has established %ld connections.\n", id, conns.size());
|
|
|
|
pthread_barrier_wait(&prepare_barrier);
|
|
|
|
|
|
|
|
V("Thread %d waiting for START...\n", id);
|
|
|
|
pthread_barrier_wait(&ok_barrier);
|
|
|
|
|
|
|
|
V("Thread %d running...\n", id);
|
|
|
|
/* send the initial packet now */
|
|
|
|
for (uint32_t i = 0; i < conns.size(); i++) {
|
|
|
|
conns.at(i)->next_send = get_time_us();
|
|
|
|
}
|
|
|
|
|
2023-02-24 23:25:52 +00:00
|
|
|
while(true) {
|
2023-01-17 20:22:43 +00:00
|
|
|
struct kevent kevs[NEVENT];
|
2023-01-05 16:17:48 +00:00
|
|
|
if (kevent(kq, NULL, 0, &ev, 1, NULL) == 1) {
|
|
|
|
struct kqconn *conn = (struct kqconn *)ev.udata;
|
|
|
|
|
|
|
|
if (ev.flags & EV_EOF) {
|
2023-01-17 20:22:43 +00:00
|
|
|
E("Connection %d dropped due to EV_EOF. ERR: %d\n", conn->conn_fd, ev.fflags);
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
|
2023-02-24 23:25:52 +00:00
|
|
|
dsmbr_handle_event(id, &ev, conn);
|
2023-01-05 16:17:48 +00:00
|
|
|
} else {
|
|
|
|
E("Thread %d kevent failed. ERR %d\n", id, errno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < conns.size(); i++) {
|
|
|
|
kqconn_cleanup(conns.at(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
close(kq);
|
|
|
|
|
|
|
|
V("Thread %d exiting...\n", id);
|
|
|
|
}
|
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
static struct dsmbr_thread_ctx *
|
|
|
|
dsmbr_create_worker(int tid, int core)
|
|
|
|
{
|
|
|
|
struct dsmbr_thread_ctx * tinfo = new struct dsmbr_thread_ctx;
|
|
|
|
|
|
|
|
int kqfd = kqueue();
|
|
|
|
if (kqfd != 0) {
|
|
|
|
E("kqueue() failed: %d\n", errno);
|
|
|
|
}
|
|
|
|
tinfo->kqfd = kqfd;
|
|
|
|
|
|
|
|
int pipes[2];
|
|
|
|
if (pipe(pipes) != 0) {
|
|
|
|
E("pipe() failed: %d\n", errno);
|
|
|
|
}
|
|
|
|
tinfo->ctrl_pipe_r = pipes[0];
|
|
|
|
tinfo->ctrl_pipe_w = pipes[1];
|
|
|
|
struct kevent kev;
|
|
|
|
EV_SET(&kev, tinfo->ctrl_pipe_r, EVFILT_READ, EV_ADD, 0, 0, NULL);
|
|
|
|
if (kevent(kqfd, &kev, 1, NULL, 0, NULL) == -1) {
|
|
|
|
E("kevent() failed: %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sem_init(&tinfo->start_sem, 0, 0) != 0) {
|
|
|
|
E("sem_init() failed: %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_attr_t attr;
|
|
|
|
int status;
|
|
|
|
if ((status = pthread_attr_init(&attr)) != 0) {
|
|
|
|
E("pthread_attr_init() failed: %d\n", status);
|
|
|
|
}
|
|
|
|
|
|
|
|
cpuset_t cpuset;
|
|
|
|
CPU_ZERO(&cpuset);
|
|
|
|
CPU_SET(core, &cpuset);
|
|
|
|
|
|
|
|
status = pthread_attr_setaffinity_np(&attr, sizeof(cpuset), &cpuset);
|
|
|
|
if (status != 0) {
|
|
|
|
E("pthread_attr_setaffinity_np() failed : %d\n", status);
|
|
|
|
}
|
|
|
|
|
|
|
|
status = pthread_create(&tinfo->thrd, &attr, dsmbr_worker_main, tinfo);
|
|
|
|
if (status != 0) {
|
|
|
|
E("pthread_create() failed: %d\n", status);
|
|
|
|
}
|
|
|
|
|
|
|
|
return tinfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsmbr_create_workers()
|
|
|
|
{
|
|
|
|
int tid = 0;
|
|
|
|
int core;
|
|
|
|
|
|
|
|
CPU_FOREACH_ISSET(core, &options.cpuset) {
|
|
|
|
options.workers.push_back(dsmbr_create_worker(tid, core));
|
|
|
|
tid++;
|
|
|
|
V("Created thread %d on core %d\n", tid, core);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-05 16:17:48 +00:00
|
|
|
static std::string
|
|
|
|
get_ip_from_hostname(std::string hostname)
|
|
|
|
{
|
|
|
|
static char rt[100] = {0};
|
|
|
|
struct in_addr **addr;
|
|
|
|
struct hostent *he;
|
|
|
|
|
|
|
|
if ((he = gethostbyname(hostname.c_str())) == NULL) {
|
|
|
|
E("Hostname %s cannot be resolved.\n", hostname.c_str());
|
|
|
|
}
|
|
|
|
addr = (struct in_addr**)he->h_addr_list;
|
|
|
|
for (int i=0;addr[i]!=NULL;i++) {
|
|
|
|
strncpy(rt, inet_ntoa(*addr[i]), 99);
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
static void
|
|
|
|
establish_slave_conns()
|
2023-01-05 16:17:48 +00:00
|
|
|
{
|
|
|
|
/* create a connection to the clients */
|
|
|
|
for (uint32_t i=0; i < client_ips.size(); i++) {
|
|
|
|
struct sockaddr_in csock_addr;
|
|
|
|
int c_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
|
|
|
|
csock_addr.sin_family = AF_INET;
|
|
|
|
csock_addr.sin_addr.s_addr = inet_addr(client_ips[i]);
|
|
|
|
csock_addr.sin_port = htons(DEFAULT_CLIENT_CTL_PORT);
|
|
|
|
|
|
|
|
V("Connecting to client %s...\n", client_ips.at(i));
|
|
|
|
|
|
|
|
if (connect(c_fd, (struct sockaddr*)&csock_addr, sizeof(csock_addr)) != 0) {
|
|
|
|
E("Connect failed. ERR %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (writebuf(c_fd, &options, sizeof(options)) < 0) {
|
|
|
|
E("Write to client. ERR %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
client_fds.push_back(c_fd);
|
|
|
|
V("Client connected %d/%lu.\n", i + 1, client_ips.size());
|
|
|
|
|
|
|
|
EV_SET(&kev[i], c_fd, EVFILT_READ, EV_ADD, 0, 0, NULL);
|
|
|
|
|
|
|
|
options.global_conn_start_idx += options.client_conn * options.client_thread_count;
|
|
|
|
}
|
|
|
|
|
2023-01-17 20:22:43 +00:00
|
|
|
V("Registering client fds to mtkq...\n");
|
2023-01-05 16:17:48 +00:00
|
|
|
/* add to main thread's kq */
|
|
|
|
if (kevent(mt_kq, kev, client_ips.size(), NULL, 0, NULL) == -1) {
|
|
|
|
E("Failed to add some clients to mtkq. ERR %d\n", errno);
|
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
}
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
static int
|
|
|
|
dsmbr_slave_ctrl_sock_create()
|
2023-01-17 20:22:43 +00:00
|
|
|
{
|
|
|
|
struct sockaddr_in server_addr;
|
|
|
|
int status;
|
|
|
|
const int enable = 1;
|
|
|
|
server_addr.sin_family = AF_INET;
|
2023-03-02 14:12:35 +00:00
|
|
|
server_addr.sin_port = htons(CTRL_PORT);
|
2023-01-17 20:22:43 +00:00
|
|
|
server_addr.sin_addr.s_addr = INADDR_ANY;
|
2023-01-05 16:17:48 +00:00
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
int fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
|
|
|
|
if (fd < 0) {
|
|
|
|
E("socket() returned %d", errno);
|
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &enable, sizeof(enable)) < 0) {
|
|
|
|
E("setsockopt() NODELAY %d", errno);
|
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
status = bind(fd, (struct sockaddr *)&server_addr, sizeof(server_addr));
|
|
|
|
if (status < 0) {
|
|
|
|
E("bind() returned %d", errno);
|
|
|
|
}
|
2023-01-17 20:22:43 +00:00
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
status = listen(fd, CTRL_BACKLOG);
|
|
|
|
if (status < 0) {
|
|
|
|
E("listen() returned %d", errno);
|
2023-01-17 20:22:43 +00:00
|
|
|
}
|
2023-03-02 14:12:35 +00:00
|
|
|
|
|
|
|
return fd;
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void client_send_stats(int qps, int send_sz, int recv_sz)
|
|
|
|
{
|
|
|
|
struct kevent kev;
|
|
|
|
int msg = 0;
|
|
|
|
ppd_slave_resp resp;
|
|
|
|
char buf[1024];
|
|
|
|
|
|
|
|
resp.set_qps(qps);
|
|
|
|
resp.set_send_sz(send_sz);
|
|
|
|
resp.set_resp_sz(recv_sz);
|
|
|
|
resp.SerializeToArray(buf + sizeof(long), 1024 - sizeof(long));
|
|
|
|
long sz = htonl(resp.ByteSizeLong());
|
|
|
|
memcpy(buf, &sz, sizeof(long));
|
|
|
|
|
|
|
|
/* clients need to send qps */
|
|
|
|
V("Sending master stats, qps %d, send %d, resp %d...\n", qps, send_sz, recv_sz);
|
|
|
|
if (writebuf(master_fd, buf, resp.ByteSizeLong() + sizeof(long)) < 0) {
|
|
|
|
E("Error writing stats to master\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
V("Waiting for master ACK...\n");
|
|
|
|
|
|
|
|
if (kevent(mt_kq, NULL, 0, &kev, 1, NULL) != 1) {
|
|
|
|
E("kevent wait for master ack %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (readbuf((int)kev.ident, &msg, sizeof(int)) < 0) {
|
|
|
|
E("Failed to receive ack from master\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg != MSG_TEST_QPS_ACK) {
|
|
|
|
E("Invalid ack message\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wait_master_prepare()
|
|
|
|
{
|
|
|
|
V("Waiting for master's PREPARE...\n");
|
|
|
|
|
|
|
|
struct sockaddr_in csock_addr;
|
|
|
|
int listen_fd;
|
|
|
|
int enable = 1;
|
|
|
|
csock_addr.sin_family = AF_INET;
|
|
|
|
csock_addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
|
|
|
csock_addr.sin_port = htons(DEFAULT_CLIENT_CTL_PORT);
|
|
|
|
listen_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
|
|
|
|
|
|
|
|
if (listen_fd < 0) {
|
|
|
|
E("socket");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable)) < 0) {
|
|
|
|
E("setsockopt reuseaddr");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (setsockopt(listen_fd, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(enable)) < 0) {
|
|
|
|
E("setsockopt reuseport");
|
|
|
|
}
|
|
|
|
|
2023-02-24 23:25:52 +00:00
|
|
|
if (bind(listen_fd, (struct sockaddr*)&csock_addr, sizeof(csock_addr)) < 0) {
|
2023-01-05 16:17:48 +00:00
|
|
|
E("bind");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (listen(listen_fd, 10) < 0) {
|
|
|
|
E("ctl listen");
|
|
|
|
}
|
|
|
|
|
|
|
|
master_fd = accept(listen_fd, NULL, NULL);
|
|
|
|
if (master_fd == -1) {
|
|
|
|
E("Failed to accept master. ERR %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
close(listen_fd);
|
|
|
|
|
|
|
|
if (readbuf(master_fd, &options, sizeof(options)) < 0) {
|
|
|
|
E("Failed to receive options from master. ERR %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
V("Registering master fd to mtkq...\n");
|
|
|
|
struct kevent kev;
|
|
|
|
|
|
|
|
EV_SET(&kev, master_fd, EVFILT_READ, EV_ADD, 0, 0, NULL);
|
|
|
|
|
|
|
|
if (kevent(mt_kq, &kev, 1, NULL, 0, NULL) == -1) {
|
|
|
|
E("Failed to register master fd to mtkq. ERR %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set the correct mode */
|
|
|
|
options.master_mode = 0;
|
|
|
|
options.client_mode = 1;
|
|
|
|
options.output_name = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void usage()
|
|
|
|
{
|
|
|
|
fprintf(stdout, "Usage:\n"
|
|
|
|
" -s: server addr.\n"
|
|
|
|
" -p: server port.\n"
|
|
|
|
" -q: target qps.\n"
|
2023-01-17 20:22:43 +00:00
|
|
|
" -c: worker thread cpulist.\n"
|
|
|
|
" -C: connections per worker thread.\n"
|
|
|
|
" -o: output file.\n"
|
2023-01-05 16:17:48 +00:00
|
|
|
" -h: show help.\n"
|
|
|
|
" -v: verbose mode.\n"
|
2023-01-17 20:22:43 +00:00
|
|
|
" -T: warm up time.\n"
|
|
|
|
" -t: duration.\n"
|
|
|
|
" -i: interarrival distribution.\n"
|
|
|
|
" -M: module path.\n"
|
|
|
|
" -O: module args of format key=value.\n"
|
|
|
|
" -a: client addrs (master mode).\n"
|
|
|
|
" -A: client mode.\n\n");
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* protocol:
|
|
|
|
*
|
2023-01-17 20:22:43 +00:00
|
|
|
* master -> client SYNC
|
2023-01-05 16:17:48 +00:00
|
|
|
* client and master all establish connections to the server
|
2023-01-17 20:22:43 +00:00
|
|
|
* client -> master ACK
|
2023-01-05 16:17:48 +00:00
|
|
|
* master -> client START (client runs forever)
|
2023-03-02 14:12:35 +00:00
|
|
|
* client -> master START_ACK
|
2023-01-05 16:17:48 +00:00
|
|
|
* master RUNS for X seconds
|
|
|
|
* master -> client STOP
|
2023-01-17 20:22:43 +00:00
|
|
|
* client -> master STATS
|
2023-01-05 16:17:48 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
main(int argc, char* argv[])
|
|
|
|
{
|
|
|
|
int ch;
|
|
|
|
FILE *resp_fp_csv;
|
|
|
|
|
|
|
|
while ((ch = getopt(argc, argv, "q:s:C:p:o:t:c:hvW:w:T:Aa:Q:i:S:l:O:")) != -1) {
|
|
|
|
switch (ch) {
|
|
|
|
case 'q':
|
|
|
|
options.target_qps = atoi(optarg);
|
|
|
|
if (options.target_qps < 0) {
|
|
|
|
E("Target QPS must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'Q':
|
|
|
|
options.master_qps = atoi(optarg);
|
|
|
|
if (options.master_qps < 0) {
|
|
|
|
E("Master QPS must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 's': {
|
|
|
|
string ip = get_ip_from_hostname(optarg);
|
|
|
|
strncpy(options.server_ip, ip.c_str(), INET_ADDRSTRLEN);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'p':
|
|
|
|
options.server_port = atoi(optarg);
|
|
|
|
if (options.server_port <= 0) {
|
|
|
|
E("Server port must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'o':
|
|
|
|
options.output_name = optarg;
|
|
|
|
break;
|
|
|
|
case 't':
|
|
|
|
options.client_thread_count = atoi(optarg);
|
|
|
|
if (options.client_thread_count <= 0) {
|
|
|
|
E("Client threads must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'T':
|
|
|
|
options.master_thread_count = atoi(optarg);
|
|
|
|
if (options.master_thread_count <= 0) {
|
|
|
|
E("Master threads must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'c':
|
|
|
|
options.client_conn = atoi(optarg);
|
|
|
|
if (options.client_conn <= 0) {
|
|
|
|
E("Client connections must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'l': {
|
|
|
|
options.workload_type = static_cast<WORKLOAD_TYPE>(atoi(optarg));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'C':
|
|
|
|
options.master_conn = atoi(optarg);
|
|
|
|
if (options.master_conn <= 0) {
|
|
|
|
E("Master connections must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'S': {
|
|
|
|
string ip = get_ip_from_hostname(optarg);
|
|
|
|
options.master_server_ip_given = 1;
|
|
|
|
strncpy(options.master_server_ip, ip.c_str(), INET_ADDRSTRLEN);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'O': {
|
|
|
|
strncpy(options.gen_params[options.num_gen_params], optarg, MAX_GEN_PARAMS_LEN);
|
|
|
|
options.num_gen_params++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'h':
|
|
|
|
case '?':
|
|
|
|
usage();
|
|
|
|
exit(0);
|
|
|
|
case 'v':
|
|
|
|
options.verbose = 1;
|
|
|
|
W("Verbose mode can cause SUBSTANTIAL latency fluctuations in some(XFCE) terminals!\n");
|
|
|
|
break;
|
|
|
|
case 'a': {
|
|
|
|
if (options.client_mode == 1) {
|
|
|
|
E("Cannot be both master and client\n");
|
|
|
|
}
|
|
|
|
string ip = get_ip_from_hostname(optarg);
|
|
|
|
char *rip = new char[INET_ADDRSTRLEN + 1];
|
|
|
|
strncpy(rip, ip.c_str(), INET_ADDRSTRLEN);
|
|
|
|
client_ips.push_back(rip);
|
|
|
|
options.master_mode = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'W':
|
|
|
|
options.warmup = atoi(optarg);
|
|
|
|
if (options.warmup < 0) {
|
|
|
|
E("Warmup must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'w':
|
|
|
|
options.duration = atoi(optarg);
|
|
|
|
if (options.duration <= 0) {
|
|
|
|
E("Test duration must be positive\n");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'A': {
|
|
|
|
if (options.master_mode == 1) {
|
|
|
|
E("Cannot be both master and client\n");
|
|
|
|
}
|
|
|
|
options.client_mode = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'i': {
|
|
|
|
strncpy(options.generator_name, optarg, MAX_GEN_LEN);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
E("Unrecognized option -%c\n\n", ch);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!options.client_mode) {
|
|
|
|
resp_fp_csv = fopen(options.output_name, "w");
|
|
|
|
if (resp_fp_csv == NULL) {
|
2023-03-02 14:12:35 +00:00
|
|
|
E("Cannot open file %s for writing %d.", options.output_name, errno);
|
2023-01-05 16:17:48 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-02 14:12:35 +00:00
|
|
|
/* create main thread's kqueue */
|
|
|
|
int mt_kq = kqueue();
|
|
|
|
struct dsmbr_ctrl_msg ctrl_msg;
|
|
|
|
int ret = 0;
|
|
|
|
int slave_sock = -1;
|
|
|
|
int master_conn = -1;
|
|
|
|
std::vector<struct dsmbr_slave *> slaves;
|
2023-01-05 16:17:48 +00:00
|
|
|
|
|
|
|
if (options.master_mode) {
|
2023-03-02 14:12:35 +00:00
|
|
|
/* initiate connections to clients */
|
|
|
|
|
2023-01-05 16:17:48 +00:00
|
|
|
} else if (options.client_mode) {
|
2023-03-02 14:12:35 +00:00
|
|
|
slave_sock = dsmbr_slave_ctrl_sock_create();
|
|
|
|
V("Waiting for master connection...\n");
|
|
|
|
ret = ppd_readbuf(slave_sock, &ctrl_msg, sizeof(ctrl_msg));
|
|
|
|
if (ret != 0) {
|
|
|
|
E("Failed to read from listen socket: %d\n", errno);
|
|
|
|
}
|
|
|
|
if (ctrl_msg.)
|
2023-01-05 16:17:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* here EVERYONE is on the same page */
|
|
|
|
options.dump();
|
|
|
|
parse_rgen_params();
|
|
|
|
|
|
|
|
vector<std::thread *> threads;
|
|
|
|
vector<int> send_pipes;
|
|
|
|
vector<int> recv_pipes;
|
|
|
|
send_pipes.resize(options.client_thread_count);
|
|
|
|
recv_pipes.resize(options.client_thread_count);
|
|
|
|
|
|
|
|
/* do our setup according to the options */
|
|
|
|
pthread_barrier_init(&prepare_barrier, NULL, options.client_thread_count + 1);
|
|
|
|
pthread_barrier_init(&ok_barrier, NULL, options.client_thread_count + 1);
|
|
|
|
|
|
|
|
V("Creating %d threads...\n", options.client_thread_count);
|
|
|
|
|
|
|
|
/* create worker threads */
|
|
|
|
vector<struct datapt *> *data = new vector<struct datapt*>[options.client_thread_count];
|
|
|
|
for (int i = 0; i < options.client_thread_count; i++) {
|
|
|
|
int pipes[2];
|
|
|
|
if (pipe(&pipes[0]) == -1) {
|
|
|
|
E("Cannot create pipes. ERR %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct perf_counter *perf = new struct perf_counter;
|
|
|
|
perf->cnt = 0;
|
|
|
|
send_pipes[i] = pipes[0];
|
|
|
|
recv_pipes[i] = pipes[1];
|
|
|
|
|
|
|
|
thrd_perf_counters.push_back(perf);
|
2023-01-17 20:22:43 +00:00
|
|
|
std::thread * thrd = new std::thread(worker_main, i, pipes[1], &data[i]);
|
2023-01-05 16:17:48 +00:00
|
|
|
threads.push_back(thrd);
|
|
|
|
pthread_t handle = thrd->native_handle();
|
|
|
|
cpuset_t cpuset;
|
|
|
|
CPU_ZERO(&cpuset);
|
|
|
|
CPU_SET(i * 2, &cpuset);
|
|
|
|
V("Setting thread %d's affinity to %d.\n", i , i * 2);
|
|
|
|
if (pthread_setaffinity_np(handle, sizeof(cpuset_t), &cpuset) != 0) {
|
|
|
|
E("Failed to set affinity for thread %d\n", i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
V("Waiting for thread connection establishment.\n");
|
|
|
|
pthread_barrier_wait(&prepare_barrier);
|
|
|
|
|
|
|
|
if (options.master_mode) {
|
|
|
|
V("Waiting for clients...\n");
|
|
|
|
/* wait for ok messages from the clients */
|
|
|
|
wait_clients_ok();
|
|
|
|
} else if (options.client_mode) {
|
|
|
|
V("Sending OK to master...\n");
|
|
|
|
/* send ok messages to the master */
|
|
|
|
send_master_ok();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* create main thread timer loop */
|
|
|
|
struct kevent kev;
|
|
|
|
#define TIMER_FD (-1)
|
|
|
|
EV_SET(&kev, TIMER_FD, EVFILT_TIMER, EV_ADD, NOTE_SECONDS, 1, NULL);
|
|
|
|
|
|
|
|
if (kevent(mt_kq, &kev, 1, NULL, 0, NULL) == -1) {
|
|
|
|
E("Cannot create kevent timer. ERR %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* kick off worker threads */
|
|
|
|
pthread_barrier_wait(&ok_barrier);
|
|
|
|
/* now we are free to start the experiment */
|
|
|
|
V("Main thread running.\n");
|
|
|
|
|
|
|
|
while(1) {
|
|
|
|
/* client mode runs forever unless server sends us */
|
|
|
|
if ((int)cur_time >= options.duration + options.warmup && !options.client_mode) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kevent(mt_kq, NULL, 0, &kev, 1, NULL) != 1) {
|
|
|
|
E("Error in main event loop. ERR %d\n", errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((int)kev.ident == TIMER_FD) {
|
|
|
|
cur_time++;
|
|
|
|
} else {
|
|
|
|
/* its from either master or client */
|
|
|
|
if (kev.flags & EV_EOF) {
|
|
|
|
E("Client or master %d disconnected\n", (int)kev.ident);
|
|
|
|
}
|
|
|
|
|
|
|
|
int msg;
|
|
|
|
|
|
|
|
if (readbuf(kev.ident, &msg, sizeof(msg)) == -1)
|
|
|
|
E("Failed to read from master_fd or message invalid. ERR %d\n", errno);
|
|
|
|
|
|
|
|
if (msg == MSG_TEST_STOP) {
|
|
|
|
V("Received STOP from master\n");
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
E("Unexpected message from master: %d\n", msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
V("Signaling threads to exit...\n");
|
|
|
|
for (int i = 0; i < options.client_thread_count; i++) {
|
|
|
|
if (write(send_pipes[i], "e", sizeof(char)) == -1) {
|
|
|
|
E("Couldn't write to thread pipe %d. ERR %d\n", send_pipes[i], errno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < options.client_thread_count; i++) {
|
|
|
|
threads.at(i)->join();
|
|
|
|
delete(threads.at(i));
|
|
|
|
delete thrd_perf_counters[i];
|
|
|
|
close(send_pipes[i]);
|
|
|
|
close(recv_pipes[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
int qps = 0;
|
|
|
|
|
|
|
|
for(int i = 0; i < options.client_thread_count; i++) {
|
|
|
|
qps += thrd_perf_counters[i]->cnt;
|
|
|
|
}
|
|
|
|
V("Total requests: %d\n", qps);
|
|
|
|
|
|
|
|
if (options.client_mode) {
|
|
|
|
client_send_stats(qps, send_sz, recv_sz);
|
|
|
|
close(master_fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct slave_stats stats;
|
|
|
|
stats.qps = 0;
|
|
|
|
stats.recv_sz = 0;
|
|
|
|
stats.send_sz = 0;
|
|
|
|
if (options.master_mode) {
|
|
|
|
V("Shutting down clients...\n");
|
|
|
|
client_stop(&stats);
|
|
|
|
qps += stats.qps;
|
|
|
|
}
|
|
|
|
|
|
|
|
V("Aggregated %d operations over %d seconds\n", qps, options.duration);
|
|
|
|
qps = qps / (options.duration);
|
|
|
|
|
|
|
|
if (!options.client_mode) {
|
|
|
|
/* stop the measurement */
|
|
|
|
V("Saving results...\n");
|
|
|
|
for (int i = 0; i < options.client_thread_count; i++) {
|
|
|
|
fprintf(resp_fp_csv, "%d,%d,%d\n", qps, stats.send_sz, stats.recv_sz);
|
|
|
|
for (uint32_t j = 0; j < data[i].size(); j++) {
|
|
|
|
struct datapt *pt = data[i].at(j);
|
|
|
|
fprintf(resp_fp_csv, "%ld\n", pt->lat);
|
|
|
|
delete pt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
delete[] data;
|
|
|
|
|
|
|
|
fclose(resp_fp_csv);
|
|
|
|
}
|
|
|
|
|
|
|
|
close(mt_kq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|