This commit is contained in:
quackerd 2021-02-23 13:12:27 -05:00
parent 162d41a4cc
commit b85777e6f0
10 changed files with 123 additions and 70 deletions

View File

@ -77,12 +77,12 @@ struct options_t {
unsigned int s_rxqid { 0 };
unsigned int s_txqid { 0 };
// for qps calculation
std::atomic<uint32_t> s_total_pkts { 0 };
std::atomic<uint32_t> s_recved_pkts { 0 };
std::atomic<uint32_t> s_pkt_loss { 0 };
std::atomic<uint64_t> s_start_time { 0 };
std::atomic<uint64_t> s_end_time { 0 };
std::atomic<uint32_t> s_slave_qps { 0 };
std::atomic<uint32_t> s_slave_total { 0 };
std::atomic<uint32_t> s_slave_recved { 0 };
std::atomic<uint32_t> s_slave_loss { 0 };
uint32_t s_state { STATE_WAIT };
@ -539,6 +539,7 @@ pkt_loop()
options.s_last_datapt->srv_sw_tx,
options.s_last_datapt->srv_hw_rx,
options.s_last_datapt->srv_sw_rx);
options.s_recved_pkts.fetch_add(1);
options.s_last_datapt = nullptr;
}
@ -568,7 +569,6 @@ pkt_loop()
options.s_last_datapt->epoch = epoch;
options.s_last_datapt->valid =
options.s_record.load();
options.s_total_pkts.fetch_add(1);
read_tx = false;
recv_resp = false;
@ -632,14 +632,15 @@ locore_main(void *tif __rte_unused)
auto pld_qps = (struct pkt_payload_qps *)
pkt_hdr->payload;
uint32_t qps = rte_be_to_cpu_32(pld_qps->qps);
uint32_t total = rte_be_to_cpu_32(pld_qps->total_pkts);
uint32_t loss = rte_be_to_cpu_32(pld_qps->total_loss);
uint32_t recved = rte_be_to_cpu_32(
pld_qps->recved_pkts);
uint32_t loss = rte_be_to_cpu_32(pld_qps->lost_pkts);
ntr(NTR_DEP_USER1, NTR_LEVEL_DEBUG,
"locore_main: received qps %d from client %d\n",
qps, i);
options.s_slave_qps.fetch_add(qps);
options.s_slave_loss.fetch_add(loss);
options.s_slave_total.fetch_add(total);
options.s_slave_recved.fetch_add(recved);
rte_pktmbuf_free(mbufs[i]);
}
}
@ -990,18 +991,17 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "failed to wait for job completion\n");
// calculate QPS
uint32_t qps = ((double)(options.s_total_pkts.load() -
options.s_pkt_loss.load())) /
uint32_t qps = (double)options.s_recved_pkts.load() /
(((double)(options.s_end_time.load() -
options.s_start_time.load()) /
(double)S2NS));
qps += options.s_slave_qps.load();
// dump stats
log_file << qps << ',' << options.s_total_pkts.load() << ','
log_file << qps << ',' << options.s_recved_pkts.load() << ','
<< options.s_pkt_loss.load() << ','
<< options.s_slave_total.load() << ','
<< options.s_slave_loss.load() << ',' << std::endl;
<< options.s_slave_recved.load() << ','
<< options.s_slave_loss.load() << std::endl;
for (auto it : options.s_data) {
if (it->valid) {
@ -1010,12 +1010,14 @@ main(int argc, char *argv[])
<< it->srv_sw_rx << ',' << it->srv_sw_tx << ','
<< it->srv_hw_rx << ',' << it->srv_hw_tx
<< std::endl;
printf("Writing ... datapt %p", it);
}
delete it;
}
log_file.close();
ntr(NTR_DEP_USER1, NTR_LEVEL_INFO, "qps = %d, total = %d, loss = %d, slave total = %d, slave loss = %d\n",
qps, options.s_total_pkts.load(), options.s_pkt_loss.load(), options.s_slave_total.load(), options.s_slave_loss.load());
ntr(NTR_DEP_USER1, NTR_LEVEL_INFO, "qps = %d, recved = %d, loss = %d, slave recved = %d, slave loss = %d\n",
qps, options.s_recved_pkts.load(), options.s_pkt_loss.load(), options.s_slave_recved.load(), options.s_slave_loss.load());
// clean up
rte_eth_dev_stop(portid);

View File

@ -49,7 +49,7 @@ class memload_generator {
constexpr static uint32_t TRANSACTION_CNT =
0x8; // how many transactions per cycle
constexpr static uint32_t TRANSACTION_SZ =
0x8; // how large each transaction is
sizeof(uint64_t); // how large each transaction is
std::vector<struct thread_info *> thr_infos;
std::atomic<int> state;

View File

@ -160,8 +160,8 @@ constexpr static uint16_t PKT_TYPE_FIN = 7;
constexpr static uint16_t PKT_TYPE_FIN_ACK = 8;
struct pkt_payload_qps {
uint32_t qps;
uint32_t total_pkts;
uint32_t total_loss;
uint32_t recved_pkts;
uint32_t lost_pkts;
};
constexpr static uint16_t NUM_PKT_TYPES = PKT_TYPE_FIN_ACK + 1;

View File

@ -20,8 +20,8 @@
constexpr static unsigned int MBUF_MAX_COUNT = 65536;
constexpr static unsigned int MBUF_CACHE_SIZE = 512;
constexpr static unsigned int RX_RING_SIZE = 1024;
constexpr static unsigned int TX_RING_SIZE = 1024;
constexpr static unsigned int RX_RING_SIZE = 2048;
constexpr static unsigned int TX_RING_SIZE = 2048;
constexpr static unsigned int BURST_SIZE = 32;
constexpr static size_t MEMPOOL_NAME_BUF_LEN = 64;
@ -697,7 +697,7 @@ main(int argc, char *argv[])
tinfo->node_id = 0; // XXX: hack
options.s_thr_info.push_back(tinfo);
ntr(NTR_DEP_USER1, NTR_LEVEL_INFO,
"main: thread %d assigned to cpu %d, node %d", tinfo->tid,
"main: thread %d assigned to cpu %d, node %d\n", tinfo->tid,
tinfo->lcore_id, tinfo->node_id);
}

View File

@ -258,6 +258,8 @@ memload_generator::worker_thrd(void *_tinfo)
next_ts = next_ts + tinfo->ia_gen->generate() * S2NS;
for (uint i = 0; i < TRANSACTION_CNT; i++) {
// memcpy((uint64_t *)(char *)tinfo->region + i,
// &now, sizeof(now));
_mm_stream_si64(
(long long *)((char *)tinfo->region +
i * TRANSACTION_SZ),

View File

@ -60,7 +60,7 @@ struct thread_info {
unsigned int rxqid { 0 };
unsigned int txqid { 0 };
// this field is read by the stat collecting thread
std::atomic<int> total_pkts { 0 };
std::atomic<int> recved_pkts { 0 };
std::atomic<int> lost_pkts { 0 };
Generator *ia_gen { nullptr };
@ -113,18 +113,18 @@ static struct options_t options;
static inline void
calc_stats(
uint64_t now, uint32_t *qps, uint32_t *total_pkt, uint32_t *total_loss)
uint64_t now, uint32_t *qps, uint32_t *recved_pkt, uint32_t *total_loss)
{
uint32_t tot = 0;
uint32_t recv = 0;
uint32_t loss = 0;
for (auto i : options.s_thr_info) {
tot += i->total_pkts.load();
recv += i->recved_pkts.load();
loss += i->lost_pkts.load();
}
if (total_pkt != nullptr) {
*total_pkt = tot;
if (recved_pkt != nullptr) {
*recved_pkt = recv;
}
if (total_loss != nullptr) {
@ -132,7 +132,7 @@ calc_stats(
}
if (qps != nullptr) {
*qps = (uint32_t)((double)(tot - loss) /
*qps = (uint32_t)((double)(recv) /
((double)(now - options.s_ts_begin.load()) / (double)S2NS));
}
}
@ -350,11 +350,11 @@ pkt_loop(struct thread_info *tinfo)
}
uint32_t qps;
uint32_t total_pkt;
uint32_t total_recv;
uint32_t total_loss;
calc_stats(now, &qps,
&total_pkt, &total_loss);
&total_recv, &total_loss);
struct pkt_hdr *pkt_hdr;
if (alloc_pkt_hdr(
@ -372,11 +372,11 @@ pkt_loop(struct thread_info *tinfo)
pkt_hdr->payload;
pld_qps->qps = rte_cpu_to_be_32(
qps);
pld_qps->total_loss =
pld_qps->recved_pkts =
rte_cpu_to_be_32(
total_loss);
pld_qps->total_pkts =
rte_cpu_to_be_32(total_pkt);
total_recv);
pld_qps->lost_pkts =
rte_cpu_to_be_32(total_loss);
const uint16_t nb_tx =
rte_eth_tx_burst(
@ -448,6 +448,7 @@ pkt_loop(struct thread_info *tinfo)
}
delete it->second;
sent_epochs.erase(it);
tinfo->recved_pkts.fetch_add(1);
} else {
// we recved an epoch we never sent
ntr(NTR_DEP_USER1, NTR_LEVEL_DEBUG,
@ -503,8 +504,6 @@ pkt_loop(struct thread_info *tinfo)
einfo->ts = now;
sent_epochs.insert({ epoch, einfo });
tinfo->total_pkts.fetch_add(1);
ntr(NTR_DEP_USER1, NTR_LEVEL_DEBUG,
"pkt_loop <thread %d>: sending packet %p with epoch 0x%x\n",
tinfo->id, (void *)tx_bufs[total_send], epoch);
@ -919,10 +918,10 @@ main(int argc, char *argv[])
}
uint32_t qps;
uint32_t total_pkts;
uint32_t total_recv;
uint32_t total_loss;
calc_stats(nm_get_uptime_ns(), &qps, &total_pkts, &total_loss);
ntr(NTR_DEP_USER1, NTR_LEVEL_INFO, "qps = %d, total = %d, loss = %d\n", qps, total_pkts, total_loss);
calc_stats(nm_get_uptime_ns(), &qps, &total_recv, &total_loss);
ntr(NTR_DEP_USER1, NTR_LEVEL_INFO, "qps = %d, recv = %d, loss = %d\n", qps, total_recv, total_loss);
for (auto each : options.s_thr_info) {
delete each->load_gen;

View File

@ -34,8 +34,8 @@ def process_dir(rootdir):
return ret
marker_map = ["o", "P", "s", "v", "*", "+", "^", "1", "2", "d", "X"]
color_map = ["xkcd:black", "xkcd:red", "xkcd:blue", "xkcd:green", "xkcd:cyan", "xkcd:yellow"]
marker_map = ["o", "P", "s", "v", "*", "+", "^", "1", "2", "d", "X", "o", "P", "s", "v", "*", "+", "^", "1", "2", "d", "X"]
color_map = ["xkcd:black", "xkcd:red", "xkcd:blue", "xkcd:green", "xkcd:cyan", "xkcd:yellow", "xkcd:purple", "xkcd:orange", "xkcd:salmon", "xkcd:lightgreen", "xkcd:indigo", "xkcd:brown"]
parser_idx_labels = ["srv_hw", "srv_sw", "clt_hw", "clt_sw"]
def add_curve(eax, label : str, qps_arr : [], lat_arr : [], marker : str, color : str):

View File

@ -42,9 +42,9 @@ class khat_parser:
if len(cells) != 5:
raise Exception("Invalid headline:" + line)
self.qps = int(cells[0])
self.master_total = int(cells[1])
self.master_recv = int(cells[1])
self.master_loss = int(cells[2])
self.slave_total = int(cells[3])
self.slave_recv = int(cells[3])
self.slave_loss = int(cells[4])
first = False
continue

View File

@ -23,7 +23,7 @@ tc_test_id = 0
def init(odir = "./results.d/"):
global tc_output_dir
tc_output_dir = odir + "_" + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
tc_output_dir = odir
tc_output_dir = os.path.expanduser(tc_output_dir)
os.system("mkdir -p " + tc_output_dir)
global tc_logfile

View File

@ -13,13 +13,41 @@ import libpar as par
import libtc as tc
step_inc_pct = 100
init_step = 200000 #
start_step = 200000
term_qps = 85000000000
init_step = 600000 #
start_step = 600000
term_qps = 50000000
term_pct = 1
inc_pct = 50
server_port = 23444
# memgen
enable_memgen = True
memgen_mask = [
"0xFFFFFF000000",
"0xFFFFFF000000",
"0xFFFFFF000000",
"0xFFFFFF000000",
"0xFFFFFF000000",
"0x000000FFFFFF",
"0x000000FFFFFF",
"0x000000FFFFFF",
"0x000000FFFFFF",
"0x000000FFFFFF",
]
memgen_target = [
"0xA",
"0xA",
"0xA",
"0xA",
"0xA",
"0xA000000",
"0xA000000",
"0xA000000",
"0xA000000",
"0xA000000",
]
# paths
test_dir = "/numam.d/build"
@ -28,29 +56,39 @@ root_dir = os.path.join(file_dir,"..")
sample_filename = "sample.txt"
affinity = [
"0xA0", # all first socket
"0x0A000000", # all 2nd socket
"0x20",
"0xA0",
"0xAA0",
"0xAAAA0",
"0xAAAAAA",
"0x2000000",
"0xA000000",
"0xAA000000",
"0xAAAA000000",
"0xAAAAAA000000",
]
master = ["skylake2.rcs.uwaterloo.ca"]
master_spec = ["192.168.123.10@3c:15:fb:c9:f3:36"]
master_cpumask = "0x4" # 1 thread
master_cpumask = "0x8" # 1 thread
server = ["skylake3.rcs.uwaterloo.ca"]
server_spec = ["192.168.123.9@3c:15:fb:c9:f3:4b"]
clients = ["skylake6.rcs.uwaterloo.ca", "skylake7.rcs.uwaterloo.ca" ] #, "skylake8.rcs.uwaterloo.ca"]
client_spec = ["192.168.123.11@3c:15:fb:62:9b:2f", "192.168.123.12@3c:15:fb:c9:f3:44"] #, "192.168.123.13@3c:15:fb:62:9c:be"]
client_cpumask = "0xAAAAAAAAAAAA"
clients = ["skylake6.rcs.uwaterloo.ca", "skylake7.rcs.uwaterloo.ca"] # "skylake8.rcs.uwaterloo.ca"]
client_spec = ["192.168.123.11@3c:15:fb:62:9b:2f", "192.168.123.12@3c:15:fb:c9:f3:44"] # "192.168.123.13@3c:15:fb:62:9c:be"]
client_cpumask = "0xAAAAAA"
rage_quit = 1000 #1s
client_rage_quit = 1000 #1s
warmup = 5
duration = 25
duration = 10
cooldown = 0
cacheline = 0
SSH_PARAM = "-o StrictHostKeyChecking=no -p77"
SSH_USER = "oscar"
master_qps = 100
master_pkt_loss = 1000
master_pkt_loss_failure = -1
hostfile = None
lockstat = False
@ -79,15 +117,18 @@ def get_client_str():
def calc_client_ld(ld : int):
return 0 if ld == 0 else ((ld - master_qps) / len(clients))
def run_exp(affinity : str, ld : int):
def run_exp(affinity : str, ld : int, aff_idx : int):
while True:
server_cmd = "sudo " + test_dir + "/khat --log-level lib.eal:err -- -A " + affinity + \
" -H " + server_spec[0]
if enable_memgen:
server_cmd += " -m -b 0 -X " + memgen_target[aff_idx] + " -x " + memgen_mask[aff_idx]
if client_only:
ssrv = None
tc.log_print(server_cmd)
else:
# start server
tc.log_print("Starting server...")
server_cmd = "sudo " + test_dir + "/khat --log-level lib.eal:err -- -A " + affinity + \
" -H " + server_spec[0]
tc.log_print(server_cmd)
ssrv = tc.remote_exec(server, server_cmd, blocking=False)
@ -101,8 +142,8 @@ def run_exp(affinity : str, ld : int):
" -q " + str(calc_client_ld(ld)) + \
" -H " + client_spec[i] + \
" -s " + server_spec[0] + \
" -r " + str(rage_quit) + \
" -D 0 -L 100 "
" -r " + str(client_rage_quit) + \
" -D 0 -l 100 "
tc.log_print(client_cmd)
sclt.append(tc.remote_exec([clients[i]], client_cmd, blocking=False)[0])
@ -116,7 +157,8 @@ def run_exp(affinity : str, ld : int):
" -T " + str(warmup) + \
" -i exponential" + \
" -q " + str(master_qps) + \
" -r " + str(rage_quit) + \
" -l " + str(master_pkt_loss) + \
" -L " + str(master_pkt_loss_failure) + \
" -A " + master_cpumask + \
" -H " + master_spec[0] + \
get_client_str()
@ -127,6 +169,7 @@ def run_exp(affinity : str, ld : int):
# launch stderr monitoring thread
exclude = None
tc.errthr_create(sp, exclude)
if not client_only:
tc.errthr_create(ssrv, exclude)
tc.errthr_create(sclt, exclude)
tc.errthr_start()
@ -168,7 +211,7 @@ def keep_results():
tc.log_print(mvcmd)
sp.check_call(mvcmd, shell=True)
tc.log_print("=== Summary - qps: " + str(parser.qps) + " master loss: " + str(float(parser.master_loss) / float(parser.master_total)) + " slave loss: " + str(float(parser.slave_loss) / float(parser.slave_total)) * 100.0 + "%" )
tc.log_print("=== Summary - qps: " + str(parser.qps) + " master loss: " + str(float(parser.master_loss) / float(parser.master_recv + parser.master_loss) * 100.00) + " slave loss: " + str(float(parser.slave_loss) / float(parser.slave_recv + parser.slave_loss) * 100.0) + "%" )
tc.log_print("=== Server HW:")
tc.log_print(par.mutilate_data.build_mut_output(parser.srv_hwlat, [parser.qps]) + "\n")
tc.log_print("=== Server SW:")
@ -188,22 +231,26 @@ def main():
tc.set_ssh_param(SSH_PARAM)
tc.set_ssh_user(SSH_USER)
output_dirname = "run"
options = getopt.getopt(sys.argv[1:], 'h:sldcp')[0]
options = getopt.getopt(sys.argv[1:], 'h:so:c')[0]
for opt, arg in options:
if opt in ('-h'):
hostfile = arg
elif opt in ('-s'):
stop_all()
return
elif opt in ('-o'):
output_dirname = arg
elif opt in ('-c'):
client_only=True
tc.init("~/results.d/numam/")
tc.init("~/results.d/numam/" + output_dirname + "_" + datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
tc.log_print("Configuration:\n" + \
"Hostfile: " + ("None" if hostfile == None else hostfile) + "\n" \
"Client only: " + str(client_only) + "\n")
"hostfile: " + ("None" if hostfile == None else hostfile) + "\n" \
"client only: " + str(client_only) + "\n" + \
"output: " + output_dirname)
if hostfile != None:
hosts = tc.parse_hostfile(hostfile)
@ -222,14 +269,17 @@ def main():
tc.begin(eaff)
tc.log_print("============ Affinity: " + str(eaff) + " Load: MAX" + " ============")
run_exp(eaff, 0)
run_exp(eaff, 0, i)
keep_results()
stop_all()
if client_only:
break
while True:
tc.log_print("============ Affinity: " + str(eaff) + " Load: " + str(cur_load) + " ============")
run_exp(eaff, cur_load)
run_exp(eaff, cur_load, i)
qps = keep_results()