app: rework autotest.py

Signed-off-by: Intel
This commit is contained in:
Intel 2012-12-20 00:00:00 +01:00 committed by Thomas Monjalon
parent a25904dbbb
commit e2cc79b75d
6 changed files with 1097 additions and 1002 deletions

View File

@ -32,632 +32,52 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Script that uses qemu controlled by python-pexpect to check that
# all autotests are working in the baremetal environment.
# Script that uses either test app or qemu controlled by python-pexpect
import sys, pexpect, time, os, re
import sys, autotest_data, autotest_runner
directory = sys.argv[2]
target = sys.argv[3]
log_file = "%s.txt"%(target)
if "baremetal" in target:
cmdline = "qemu-system-x86_64 -cdrom %s.iso -boot d "%(sys.argv[1])
cmdline += "-m 2000 -smp 4 -nographic -net nic,model=e1000"
platform = "QEMU x86_64"
else:
cmdline = "%s -c f -n 4"%(sys.argv[1])
try:
platform = open("/root/rte_platform_model.txt").read()
except:
platform = "unknown"
print cmdline
def usage():
print"Usage: autotest.py [test app|test iso image]",
print "[target] [whitelist|-blacklist]"
report_hdr=""".. <COPYRIGHT_TAG>
if len(sys.argv) < 3:
usage()
sys.exit(1)
"""
target = sys.argv[2]
test_whitelist=None
test_blacklist=None
class SubTest:
"Defines a subtest"
def __init__(self, title, function, command=None, timeout=10, genreport=None):
self.title = title
self.function = function
self.command = command
self.timeout = timeout
self.genreport = genreport
# get blacklist/whitelist
if len(sys.argv) > 3:
testlist = sys.argv[3].split(',')
testlist = [test.lower() for test in testlist]
if testlist[0].startswith('-'):
testlist[0] = testlist[0].lstrip('-')
test_blacklist = testlist
else:
test_whitelist = testlist
class AutoTest:
"""This class contains all methods needed to launch several
automatic tests, archive test results, log, and generate a nice
test report in restructured text"""
title = "new"
mainlog = None
logbuf = None
literal = 0
test_list = []
report_list = []
child = None
def __init__(self, pexpectchild, filename, mode):
"Init the Autotest class"
self.mainlog = file(filename, mode)
self.child = pexpectchild
pexpectchild.logfile = self
def register(self, filename, title, subtest_list):
"Register a test with a list of subtests"
test = {}
test["filename"] = filename
test["title"] = title
test["subtest_list"] = subtest_list
self.test_list.append(test)
def start(self):
"start the tests, and fill the internal report_list field"
for t in self.test_list:
report = {}
report["date"] = time.asctime()
report["title"] = t["title"]
report["filename"] = t["filename"]
report["subreport_list"] = []
report["fails"] = 0
report["success"] = 0
report["subreport_list"] = []
for st in t["subtest_list"]:
if test_whitelist is not None and st.title not in test_whitelist:
continue
if test_blacklist is not None and st.title in test_blacklist:
continue
subreport = {}
self.reportbuf = ""
subreport["title"] = st.title
subreport["func"] = st.function
subreport["command"] = st.command
subreport["timeout"] = st.timeout
subreport["genreport"] = st.genreport
# launch subtest
print "%s (%s): "%(subreport["title"], subreport["command"]),
sys.stdout.flush()
start = time.time()
res = subreport["func"](self.child,
command = subreport["command"],
timeout = subreport["timeout"])
t = int(time.time() - start)
subreport["time"] = "%dmn%d"%(t/60, t%60)
subreport["result"] = res[0] # 0 or -1
subreport["result_str"] = res[1] # cause of fail
subreport["logs"] = self.reportbuf
print "%s [%s]"%(subreport["result_str"], subreport["time"])
if subreport["result"] == 0:
report["success"] += 1
else:
report["fails"] += 1
report["subreport_list"].append(subreport)
self.report_list.append(report)
def gen_report(self):
for report in self.report_list:
# main report header and stats
self.literal = 0
reportlog = file(report["filename"], "w")
reportlog.write(report_hdr)
reportlog.write(report["title"] + "\n")
reportlog.write(re.sub(".", "=", report["title"]) + "\n\n")
reportlog.write("Autogenerated test report:\n\n" )
reportlog.write("- date: **%s**\n"%(report["date"]))
reportlog.write("- target: **%s**\n"%(target))
reportlog.write("- success: **%d**\n"%(report["success"]))
reportlog.write("- fails: **%d**\n"%(report["fails"]))
reportlog.write("- platform: **%s**\n\n"%(platform))
# summary
reportlog.write(".. csv-table:: Test results summary\n")
reportlog.write(' :header: "Name", "Result"\n\n')
for subreport in report["subreport_list"]:
if subreport["result"] == 0:
res_str = "Success"
else:
res_str = "Failure"
reportlog.write(' "%s", "%s"\n'%(subreport["title"], res_str))
reportlog.write('\n')
# subreports
for subreport in report["subreport_list"]:
# print subtitle
reportlog.write(subreport["title"] + "\n")
reportlog.write(re.sub(".", "-", subreport["title"]) + "\n\n")
# print logs
reportlog.write("::\n \n ")
s = subreport["logs"].replace("\n", "\n ")
reportlog.write(s)
# print result
reportlog.write("\n\n")
reportlog.write("**" + subreport["result_str"] + "**\n\n")
# custom genreport
if subreport["genreport"] != None:
s = subreport["genreport"]()
reportlog.write(s)
reportlog.close()
# displayed on console
print
print "-------------------------"
print
if report["fails"] == 0:
print "All test OK"
else:
print "%s test(s) failed"%(report["fails"])
# file API, to store logs from pexpect
def write(self, buf):
s = buf[:]
s = s.replace("\r", "")
self.mainlog.write(s)
self.reportbuf += s
def flush(self):
self.mainlog.flush()
def close(self):
self.mainlog.close()
# Try to match prompt: return 0 on success, else return -1
def wait_prompt(child):
for i in range(3):
index = child.expect(["RTE>>", pexpect.TIMEOUT], timeout = 1)
child.sendline("")
if index == 0:
return 0
print "Cannot find prompt"
return -1
# Try to match prompt after boot: return 0 on success, else return -1
def wait_boot(child):
index = child.expect(["RTE>>", pexpect.TIMEOUT],
timeout = 120)
if index == 0:
return 0
if (wait_prompt(child) == -1):
print "Target did not boot, failed"
return -1
return 0
# quit RTE
def quit(child):
if wait_boot(child) != 0:
return -1, "Cannot find prompt"
child.sendline("quit")
return 0, "Success"
# Default function to launch an autotest that does not need to
# interact with the user. Basically, this function calls the autotest
# function through command line interface, then check that it displays
# "Test OK" or "Test Failed".
def default_autotest(child, command, timeout=10):
if wait_prompt(child) != 0:
return -1, "Failed: cannot find prompt"
child.sendline(command)
index = child.expect(["Test OK", "Test Failed",
pexpect.TIMEOUT], timeout = timeout)
if index == 1:
return -1, "Failed"
elif index == 2:
return -1, "Failed [Timeout]"
return 0, "Success"
# wait boot
def boot_autotest(child, **kargs):
if wait_boot(child) != 0:
return -1, "Cannot find prompt"
return 0, "Success"
# Test memory dump. We need to check that at least one memory zone is
# displayed.
def memory_autotest(child, command, **kargs):
if wait_prompt(child) != 0:
return -1, "Failed: cannot find prompt"
child.sendline(command)
regexp = "phys:0x[0-9a-f]*, len:0x([0-9a-f]*), virt:0x[0-9a-f]*, socket_id:[0-9]*"
index = child.expect([regexp, pexpect.TIMEOUT], timeout = 180)
if index != 0:
return -1, "Failed: timeout"
size = int(child.match.groups()[0], 16)
if size <= 0:
return -1, "Failed: bad size"
index = child.expect(["Test OK", "Test Failed",
pexpect.TIMEOUT], timeout = 10)
if index == 1:
return -1, "Failed: C code returned an error"
elif index == 2:
return -1, "Failed: timeout"
return 0, "Success"
# Test some libc functions including scanf. This requires a
# interaction with the user (simulated in expect), so we cannot use
# default_autotest() here.
def string_autotest(child, command, **kargs):
if wait_prompt(child) != 0:
return -1, "Failed: cannot find prompt"
child.sendline(command)
index = child.expect(["Now, test scanf, enter this number",
pexpect.TIMEOUT], timeout = 10)
if index != 0:
return -1, "Failed: timeout"
child.sendline("123456")
index = child.expect(["number=123456", pexpect.TIMEOUT], timeout = 10)
if index != 0:
return -1, "Failed: timeout (2)"
index = child.expect(["Test OK", "Test Failed",
pexpect.TIMEOUT], timeout = 10)
if index != 0:
return -1, "Failed: C code returned an error"
return 0, "Success"
# Test spinlock. This requires to check the order of displayed lines:
# we cannot use default_autotest() here.
def spinlock_autotest(child, command, **kargs):
i = 0
ir = 0
if wait_prompt(child) != 0:
return -1, "Failed: cannot find prompt"
child.sendline(command)
while True:
index = child.expect(["Test OK",
"Test Failed",
"Hello from core ([0-9]*) !",
"Hello from within recursive locks from ([0-9]*) !",
pexpect.TIMEOUT], timeout = 20)
# ok
if index == 0:
break
# message, check ordering
elif index == 2:
if int(child.match.groups()[0]) < i:
return -1, "Failed: bad order"
i = int(child.match.groups()[0])
elif index == 3:
if int(child.match.groups()[0]) < ir:
return -1, "Failed: bad order"
ir = int(child.match.groups()[0])
# fail
else:
return -1, "Failed: timeout or error"
return 0, "Success"
# Test rwlock. This requires to check the order of displayed lines:
# we cannot use default_autotest() here.
def rwlock_autotest(child, command, **kargs):
i = 0
if wait_prompt(child) != 0:
return -1, "Failed: cannot find prompt"
child.sendline(command)
while True:
index = child.expect(["Test OK",
"Test Failed",
"Hello from core ([0-9]*) !",
"Global write lock taken on master core ([0-9]*)",
pexpect.TIMEOUT], timeout = 10)
# ok
if index == 0:
if i != 0xffff:
return -1, "Failed: a message is missing"
break
# message, check ordering
elif index == 2:
if int(child.match.groups()[0]) < i:
return -1, "Failed: bad order"
i = int(child.match.groups()[0])
# must be the last message, check ordering
elif index == 3:
i = 0xffff
# fail
else:
return -1, "Failed: timeout or error"
return 0, "Success"
# Test logs. This requires to check the order of displayed lines:
# we cannot use default_autotest() here.
def logs_autotest(child, command, **kargs):
i = 0
if wait_prompt(child) != 0:
return -1, "Failed: cannot find prompt"
child.sendline(command)
log_list = [
"TESTAPP1: this is a debug level message",
"TESTAPP1: this is a info level message",
"TESTAPP1: this is a warning level message",
"TESTAPP2: this is a info level message",
"TESTAPP2: this is a warning level message",
"TESTAPP1: this is a debug level message",
"TESTAPP1: this is a debug level message",
"TESTAPP1: this is a info level message",
"TESTAPP1: this is a warning level message",
"TESTAPP2: this is a info level message",
"TESTAPP2: this is a warning level message",
"TESTAPP1: this is a debug level message",
]
for log_msg in log_list:
index = child.expect([log_msg,
"Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout = 10)
# not ok
if index != 0:
return -1, "Failed: timeout or error"
index = child.expect(["Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout = 10)
return 0, "Success"
# Test timers. This requires to check the order of displayed lines:
# we cannot use default_autotest() here.
def timer_autotest(child, command, **kargs):
i = 0
if wait_prompt(child) != 0:
return -1, "Failed: cannot find prompt"
child.sendline(command)
index = child.expect(["Start timer stress tests \(30 seconds\)",
"Test Failed",
pexpect.TIMEOUT], timeout = 10)
# not ok
if index != 0:
return -1, "Failed: timeout or error"
index = child.expect(["Start timer basic tests \(30 seconds\)",
"Test Failed",
pexpect.TIMEOUT], timeout = 40)
# not ok
if index != 0:
return -1, "Failed: timeout or error (2)"
prev_lcore_timer1 = -1
lcore_tim0 = -1
lcore_tim1 = -1
lcore_tim2 = -1
lcore_tim3 = -1
while True:
index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) count=([0-9]*) on core ([0-9]*)",
"Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout = 10)
if index == 1:
break
if index != 0:
return -1, "Failed: timeout or error (3)"
try:
t = int(child.match.groups()[0])
id = int(child.match.groups()[1])
cnt = int(child.match.groups()[2])
lcore = int(child.match.groups()[3])
except:
return -1, "Failed: cannot parse output"
# timer0 always expires on the same core when cnt < 20
if id == 0:
if lcore_tim0 == -1:
lcore_tim0 = lcore
elif lcore != lcore_tim0 and cnt < 20:
return -1, "Failed: lcore != lcore_tim0 (%d, %d)"%(lcore, lcore_tim0)
if cnt > 21:
return -1, "Failed: tim0 cnt > 21"
# timer1 each time expires on a different core
if id == 1:
if lcore == lcore_tim1:
return -1, "Failed: lcore == lcore_tim1 (%d, %d)"%(lcore, lcore_tim1)
lcore_tim1 = lcore
if cnt > 10:
return -1, "Failed: tim1 cnt > 30"
# timer0 always expires on the same core
if id == 2:
if lcore_tim2 == -1:
lcore_tim2 = lcore
elif lcore != lcore_tim2:
return -1, "Failed: lcore != lcore_tim2 (%d, %d)"%(lcore, lcore_tim2)
if cnt > 30:
return -1, "Failed: tim2 cnt > 30"
# timer0 always expires on the same core
if id == 3:
if lcore_tim3 == -1:
lcore_tim3 = lcore
elif lcore != lcore_tim3:
return -1, "Failed: lcore_tim3 changed (%d -> %d)"%(lcore, lcore_tim3)
if cnt > 30:
return -1, "Failed: tim3 cnt > 30"
# must be 2 different cores
if lcore_tim0 == lcore_tim3:
return -1, "Failed: lcore_tim0 (%d) == lcore_tim3 (%d)"%(lcore_tim0, lcore_tim3)
return 0, "Success"
# Ring autotest
def ring_autotest(child, command, timeout=10):
if wait_prompt(child) != 0:
return -1, "Failed: cannot find prompt"
child.sendline(command)
index = child.expect(["Test OK", "Test Failed",
pexpect.TIMEOUT], timeout = timeout)
if index != 0:
return -1, "Failed"
child.sendline("set_watermark test 100")
child.sendline("set_quota test 16")
child.sendline("dump_ring test")
index = child.expect([" watermark=100",
pexpect.TIMEOUT], timeout = 1)
if index != 0:
return -1, "Failed: bad watermark"
index = child.expect([" bulk_default=16",
pexpect.TIMEOUT], timeout = 1)
if index != 0:
return -1, "Failed: bad quota"
return 0, "Success"
def ring_genreport():
s = "Performance curves\n"
s += "------------------\n\n"
sdk = os.getenv("RTE_SDK")
script = os.path.join(sdk, "app/test/graph_ring.py")
title ='"Autotest %s %s"'%(target, time.asctime())
filename = target + ".txt"
os.system("/usr/bin/python %s %s %s"%(script, filename, title))
for f in os.listdir("."):
if not f.startswith("ring"):
continue
if not f.endswith(".svg"):
continue
# skip single producer/consumer
if "_sc" in f:
continue
if "_sp" in f:
continue
f = f[:-4] + ".png"
s += ".. figure:: ../../images/autotests/%s/%s\n"%(target, f)
s += " :width: 50%\n\n"
s += " %s\n\n"%(f)
return s
def mempool_genreport():
s = "Performance curves\n"
s += "------------------\n\n"
sdk = os.getenv("RTE_SDK")
script = os.path.join(sdk, "app/test/graph_mempool.py")
title ='"Autotest %s %s"'%(target, time.asctime())
filename = target + ".txt"
os.system("/usr/bin/python %s %s %s"%(script, filename, title))
for f in os.listdir("."):
if not f.startswith("mempool"):
continue
if not f.endswith(".svg"):
continue
# skip when n_keep = 128
if "_128." in f:
continue
f = f[:-4] + ".png"
s += ".. figure:: ../../images/autotests/%s/%s\n"%(target, f)
s += " :width: 50%\n\n"
s += " %s\n\n"%(f)
return s
#
# main
#
if len(sys.argv) > 4:
testlist=sys.argv[4].split(',')
if testlist[0].startswith('-'):
testlist[0]=testlist[0].lstrip('-')
test_blacklist=testlist
else:
test_whitelist=testlist
child = pexpect.spawn(cmdline)
autotest = AutoTest(child, log_file,'w')
# timeout for memcpy and hash test
# adjust test command line
if "baremetal" in target:
timeout = 60*180
cmdline = "qemu-system-x86_64 -cdrom %s.iso -boot d " % (sys.argv[1])
cmdline += "-m 2000 -smp 4 -nographic -net nic,model=e1000"
platform = "QEMU x86_64"
else:
timeout = 180
cmdline = "%s -c f -n 4"%(sys.argv[1])
autotest.register("eal_report.rst", "EAL-%s"%(target),
[ SubTest("Boot", boot_autotest, "boot_autotest"),
SubTest("EAL Flags", default_autotest, "eal_flags_autotest"),
SubTest("Version", default_autotest, "version_autotest"),
SubTest("PCI", default_autotest, "pci_autotest"),
SubTest("Memory", memory_autotest, "memory_autotest"),
SubTest("Lcore launch", default_autotest, "per_lcore_autotest"),
SubTest("Spinlock", spinlock_autotest, "spinlock_autotest"),
SubTest("Rwlock", rwlock_autotest, "rwlock_autotest"),
SubTest("Atomic", default_autotest, "atomic_autotest"),
SubTest("Byte order", default_autotest, "byteorder_autotest"),
SubTest("Prefetch", default_autotest, "prefetch_autotest"),
SubTest("Debug", default_autotest, "debug_autotest"),
SubTest("Cycles", default_autotest, "cycles_autotest"),
SubTest("Logs", logs_autotest, "logs_autotest"),
SubTest("Memzone", default_autotest, "memzone_autotest"),
SubTest("Cpu flags", default_autotest, "cpuflags_autotest"),
SubTest("Memcpy", default_autotest, "memcpy_autotest", timeout),
SubTest("String Functions", default_autotest, "string_autotest"),
SubTest("Alarm", default_autotest, "alarm_autotest", 30),
SubTest("Interrupt", default_autotest, "interrupt_autotest"),
])
print cmdline
autotest.register("ring_report.rst", "Ring-%s"%(target),
[ SubTest("Ring", ring_autotest, "ring_autotest", 30*60,
ring_genreport)
])
runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist, test_whitelist)
if "baremetal" in target:
timeout = 60*60*3
else:
timeout = 60*30
for test_group in autotest_data.parallel_test_group_list:
runner.add_parallel_test_group(test_group)
autotest.register("mempool_report.rst", "Mempool-%s"%(target),
[ SubTest("Mempool", default_autotest, "mempool_autotest",
timeout, mempool_genreport)
])
autotest.register("mbuf_report.rst", "Mbuf-%s"%(target),
[ SubTest("Mbuf", default_autotest, "mbuf_autotest", timeout=120)
])
autotest.register("timer_report.rst", "Timer-%s"%(target),
[ SubTest("Timer", timer_autotest, "timer_autotest")
])
autotest.register("malloc_report.rst", "Malloc-%s"%(target),
[ SubTest("Malloc", default_autotest, "malloc_autotest")
])
for test_group in autotest_data.non_parallel_test_group_list:
runner.add_non_parallel_test_group(test_group)
# only do the hash autotest if supported by the platform
if not (platform.startswith("Intel(R) Core(TM)2 Quad CPU") or
platform.startswith("QEMU")):
autotest.register("hash_report.rst", "Hash-%s"%(target),
[ SubTest("Hash", default_autotest, "hash_autotest", timeout)
])
runner.run_all_tests()
autotest.register("lpm_report.rst", "LPM-%s"%(target),
[ SubTest("Lpm", default_autotest, "lpm_autotest", timeout)
])
autotest.register("eal2_report.rst", "EAL2-%s"%(target),
[ SubTest("TailQ", default_autotest, "tailq_autotest"),
SubTest("Errno", default_autotest, "errno_autotest"),
SubTest("Multiprocess", default_autotest, "multiprocess_autotest")
])
autotest.start()
autotest.gen_report()
quit(child)
child.terminate()
sys.exit(0)

358
app/test/autotest_data.py Normal file
View File

@ -0,0 +1,358 @@
#!/usr/bin/python
# BSD LICENSE
#
# Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Test data for autotests
from glob import glob
from autotest_test_funcs import *
# quick and dirty function to find out number of sockets
def num_sockets():
return len(glob("/sys/devices/system/node/node*"))
# multiply given number for all sockets
# e.g. 32 becomes 32,32 or 32,32,32,32 etc.
def all_sockets(num):
mem_per_socket = num / num_sockets()
return ",".join([str(mem_per_socket)] * num_sockets())
# groups of tests that can be run in parallel
# the grouping has been found largely empirically
parallel_test_group_list = [
{
"Prefix": "group_1",
"Memory" : "2",
"Tests" :
[
{
"Name" : "Timer autotest",
"Command" : "timer_autotest",
"Func" : timer_autotest,
"Report" : None,
},
{
"Name" : "Debug autotest",
"Command" : "debug_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Errno autotest",
"Command" : "errno_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Dump log history",
"Command" : "dump_log_history",
"Func" : dump_autotest,
"Report" : None,
},
{
"Name" : "Dump rings",
"Command" : "dump_ring",
"Func" : dump_autotest,
"Report" : None,
},
{
"Name" : "Dump mempools",
"Command" : "dump_mempool",
"Func" : dump_autotest,
"Report" : None,
},
]
},
{
"Prefix": "group_2",
"Memory" : "32",
"Tests" :
[
{
"Name" : "Memory autotest",
"Command" : "memory_autotest",
"Func" : memory_autotest,
"Report" : None,
},
{
"Name" : "Read/write lock autotest",
"Command" : "rwlock_autotest",
"Func" : rwlock_autotest,
"Report" : None,
},
{
"Name" : "Logs autotest",
"Command" : "logs_autotest",
"Func" : logs_autotest,
"Report" : None,
},
{
"Name" : "CPU flags autotest",
"Command" : "cpuflags_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Version autotest",
"Command" : "version_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "EAL filesystem autotest",
"Command" : "eal_fs_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "EAL flags autotest",
"Command" : "eal_flags_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Hash autotest",
"Command" : "hash_autotest",
"Func" : default_autotest,
"Report" : None,
},
],
},
{
"Prefix": "group_3",
"Memory" : all_sockets(256),
"Tests" :
[
{
"Name" : "LPM autotest",
"Command" : "lpm_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Memcpy autotest",
"Command" : "memcpy_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Memzone autotest",
"Command" : "memzone_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "String autotest",
"Command" : "string_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Alarm autotest",
"Command" : "alarm_autotest",
"Func" : default_autotest,
"Report" : None,
},
]
},
{
"Prefix": "group_4",
"Memory" : all_sockets(128),
"Tests" :
[
{
"Name" : "PCI autotest",
"Command" : "pci_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Malloc autotest",
"Command" : "malloc_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Multi-process autotest",
"Command" : "multiprocess_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Mbuf autotest",
"Command" : "mbuf_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Per-lcore autotest",
"Command" : "per_lcore_autotest",
"Func" : default_autotest,
"Report" : None,
},
]
},
{
"Prefix": "group_5",
"Memory" : "16",
"Tests" :
[
{
"Name" : "Spinlock autotest",
"Command" : "spinlock_autotest",
"Func" : spinlock_autotest,
"Report" : None,
},
{
"Name" : "Byte order autotest",
"Command" : "byteorder_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "TAILQ autotest",
"Command" : "tailq_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Command-line autotest",
"Command" : "cmdline_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Interrupts autotest",
"Command" : "interrupt_autotest",
"Func" : default_autotest,
"Report" : None,
},
]
},
{
"Prefix": "group_6",
"Memory" : all_sockets(588),
"Tests" :
[
{
"Name" : "Function reentrancy autotest",
"Command" : "func_reentrancy_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Mempool autotest",
"Command" : "mempool_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Atomics autotest",
"Command" : "atomic_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Prefetch autotest",
"Command" : "prefetch_autotest",
"Func" : default_autotest,
"Report" : None,
},
]
},
]
# tests that should not be run when any other tests are running
non_parallel_test_group_list = [
{
"Prefix": "mempool_perf",
"Memory" : all_sockets(256),
"Tests" :
[
{
"Name" : "Cycles autotest",
"Command" : "cycles_autotest",
"Func" : default_autotest,
"Report" : None,
},
{
"Name" : "Mempool performance autotest",
"Command" : "mempool_perf_autotest",
"Func" : default_autotest,
"Report" : None,
},
]
},
{
"Prefix": "memcpy_perf",
"Memory" : all_sockets(512),
"Tests" :
[
{
"Name" : "Memcpy performance autotest",
"Command" : "memcpy_perf_autotest",
"Func" : default_autotest,
"Report" : None,
},
]
},
{
"Prefix": "hash_perf",
"Memory" : all_sockets(512),
"Tests" :
[
{
"Name" : "Hash performance autotest",
"Command" : "hash_perf_autotest",
"Func" : default_autotest,
"Report" : None,
},
]
},
{
"Prefix": "ring_perf",
"Memory" : all_sockets(512),
"Tests" :
[
{
"Name" : "Ring autotest",
"Command" : "ring_autotest",
"Func" : ring_autotest,
"Report" : None,
},
]
},
]

419
app/test/autotest_runner.py Normal file
View File

@ -0,0 +1,419 @@
#!/usr/bin/python
# BSD LICENSE
#
# Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The main logic behind running autotests in parallel
import multiprocessing, sys, pexpect, time, os, StringIO, csv
# wait for prompt
def wait_prompt(child):
try:
child.sendline()
result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
timeout = 120)
except:
return False
if result == 0:
return True
else:
return False
# run a test group
# each result tuple in results list consists of:
# result value (0 or -1)
# result string
# test name
# total test run time (double)
# raw test log
# test report (if not available, should be None)
#
# this function needs to be outside AutotestRunner class
# because otherwise Pool won't work (or rather it will require
# quite a bit of effort to make it work).
def run_test_group(cmdline, test_group):
results = []
child = None
start_time = time.time()
startuplog = None
# run test app
try:
# prepare logging of init
startuplog = StringIO.StringIO()
print >>startuplog, "\n%s %s\n" % ("="*20, test_group["Prefix"])
child = pexpect.spawn(cmdline, logfile=startuplog)
# wait for target to boot
if not wait_prompt(child):
child.close()
results.append((-1, "Fail [No prompt]", "Start %s" % test_group["Prefix"],
time.time() - start_time, startuplog.getvalue(), None))
# mark all tests as failed
for test in test_group["Tests"]:
results.append((-1, "Fail [No prompt]", test["Name"],
time.time() - start_time, "", None))
# exit test
return results
except:
results.append((-1, "Fail [Can't run]", "Start %s" % test_group["Prefix"],
time.time() - start_time, startuplog.getvalue(), None))
# mark all tests as failed
for t in test_group["Tests"]:
results.append((-1, "Fail [Can't run]", t["Name"],
time.time() - start_time, "", None))
# exit test
return results
# startup was successful
results.append((0, "Success", "Start %s" % test_group["Prefix"],
time.time() - start_time, startuplog.getvalue(), None))
# run all tests in test group
for test in test_group["Tests"]:
# create log buffer for each test
# in multiprocessing environment, the logging would be
# interleaved and will create a mess, hence the buffering
logfile = StringIO.StringIO()
child.logfile = logfile
result = ()
# make a note when the test started
start_time = time.time()
try:
# print test name to log buffer
print >>logfile, "\n%s %s\n" % ("-"*20, test["Name"])
# run test function associated with the test
result = test["Func"](child, test["Command"])
# make a note when the test was finished
end_time = time.time()
# append test data to the result tuple
result += (test["Name"], end_time - start_time,
logfile.getvalue())
# call report function, if any defined, and supply it with
# target and complete log for test run
if test["Report"]:
report = test["Report"](self.target, log)
# append report to results tuple
result += (report,)
else:
# report is None
result += (None,)
except:
# make a note when the test crashed
end_time = time.time()
# mark test as failed
result = (-1, "Fail [Crash]", test["Name"],
end_time - start_time, logfile.getvalue(), None)
finally:
# append the results to the results list
results.append(result)
# regardless of whether test has crashed, try quitting it
try:
child.sendline("quit")
child.close()
# if the test crashed, just do nothing instead
except:
# nop
pass
# return test results
return results
# class representing an instance of autotests run
class AutotestRunner:
cmdline = ""
parallel_test_groups = []
non_parallel_test_groups = []
logfile = None
csvwriter = None
target = ""
start = None
n_tests = 0
fails = 0
log_buffers = []
blacklist = []
whitelist = []
def __init__(self, cmdline, target, blacklist, whitelist):
self.cmdline = cmdline
self.target = target
self.blacklist = blacklist
self.whitelist = whitelist
# log file filename
logfile = "%s.log" % target
csvfile = "%s.csv" % target
self.logfile = open(logfile, "w")
csvfile = open(csvfile, "w")
self.csvwriter = csv.writer(csvfile)
# prepare results table
self.csvwriter.writerow(["test_name","test_result","result_str"])
# set up cmdline string
def __get_cmdline(self, test):
cmdline = self.cmdline
# perform additional linuxapp adjustments
if not "baremetal" in self.target:
# append memory limitations for each test
# otherwise tests won't run in parallel
if not "i686" in self.target:
cmdline += " --socket-mem=%s"% test["Memory"]
else:
# affinitize startup so that tests don't fail on i686
cmdline = "taskset 1 " + cmdline
cmdline += " -m " + str(sum(map(int,test["Memory"].split(","))))
# set group prefix for autotest group
# otherwise they won't run in parallel
cmdline += " --file-prefix=%s"% test["Prefix"]
return cmdline
return cmdline
def add_parallel_test_group(self,test_group):
self.parallel_test_groups.append(test_group)
def add_non_parallel_test_group(self,test_group):
self.non_parallel_test_groups.append(test_group)
def __process_results(self, results):
# this iterates over individual test results
for i, result in enumerate(results):
# increase total number of tests that were run
# do not include "start" test
if i > 0:
self.n_tests += 1
# unpack result tuple
test_result, result_str, test_name, \
test_time, log, report = result
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print results, test run time and total time since start
print ("%s:" % test_name).ljust(30),
print result_str.ljust(29),
print "[%02dm %02ds]" % (test_time / 60, test_time % 60),
# don't print out total time every line, it's the same anyway
if i == len(results) - 1:
print "[%02dm %02ds]" % (total_time / 60, total_time % 60)
else:
print ""
# if test failed and it wasn't a "start" test
if test_result < 0 and not i == 0:
self.fails += 1
# collect logs
self.log_buffers.append(log)
# create report if it exists
if report:
try:
f = open("%s_%s_report.rst" % (self.target,test_name), "w")
except IOError:
print "Report for %s could not be created!" % test_name
else:
with f:
f.write(report)
# write test result to CSV file
if i != 0:
self.csvwriter.writerow([test_name, test_result, result_str])
# this function iterates over test groups and removes each
# test that is not in whitelist/blacklist
def __filter_groups(self, test_groups):
groups_to_remove = []
# filter out tests from parallel test groups
for i, test_group in enumerate(test_groups):
# iterate over a copy so that we could safely delete individual tests
for test in test_group["Tests"][:]:
test_id = test["Command"]
# dump tests are specified in full e.g. "Dump_mempool"
if "_autotest" in test_id:
test_id = test_id[:-len("_autotest")]
# filter out blacklisted/whitelisted tests
if self.blacklist and test_id in self.blacklist:
test_group["Tests"].remove(test)
continue
if self.whitelist and test_id not in self.whitelist:
test_group["Tests"].remove(test)
continue
# modify or remove original group
if len(test_group["Tests"]) > 0:
test_groups[i] = test_group
else:
# remember which groups should be deleted
# put the numbers backwards so that we start
# deleting from the end, not from the beginning
groups_to_remove.insert(0, i)
# remove test groups that need to be removed
for i in groups_to_remove:
del test_groups[i]
return test_groups
# iterate over test groups and run tests associated with them
def run_all_tests(self):
# filter groups
self.parallel_test_groups = \
self.__filter_groups(self.parallel_test_groups)
self.non_parallel_test_groups = \
self.__filter_groups(self.non_parallel_test_groups)
# create a pool of worker threads
if not "baremetal" in self.target:
pool = multiprocessing.Pool(processes=4)
else:
# we can't be sure running baremetal tests in parallel
# will work, so let's stay on the safe side
pool = multiprocessing.Pool(processes=1)
results = []
# whatever happens, try to save as much logs as possible
try:
# create table header
print ""
print "Test name".ljust(30),
print "Test result".ljust(29),
print "Test".center(9),
print "Total".center(9)
print "=" * 80
# make a note of tests start time
self.start = time.time()
# assign worker threads to run test groups
for test_group in self.parallel_test_groups:
result = pool.apply_async(run_test_group,
[self.__get_cmdline(test_group), test_group])
results.append(result)
# iterate while we have group execution results to get
while len(results) > 0:
# iterate over a copy to be able to safely delete results
# this iterates over a list of group results
for group_result in results[:]:
# if the thread hasn't finished yet, continue
if not group_result.ready():
continue
res = group_result.get()
self.__process_results(res)
# remove result from results list once we're done with it
results.remove(group_result)
# run non_parallel tests. they are run one by one, synchronously
for test_group in self.non_parallel_test_groups:
group_result = run_test_group(self.__get_cmdline(test_group), test_group)
self.__process_results(group_result)
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print out summary
print "=" * 80
print "Total run time: %02dm %02ds" % (total_time / 60, total_time % 60)
if self.fails != 0:
print "Number of failed tests: %s" % str(self.fails)
# write summary to logfile
self.logfile.write("Summary\n")
self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
self.logfile.write("Failed tests: ".ljust(15) + "%i\n" % self.fails)
except:
print "Exception occured"
print sys.exc_info()
# drop logs from all executions to a logfile
for buf in self.log_buffers:
self.logfile.write(buf.replace("\r",""))
log_buffers = []

View File

@ -0,0 +1,290 @@
#!/usr/bin/python
# BSD LICENSE
#
# Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Test functions
import sys, pexpect, time, os, re
# default autotest, used to run most tests
# waits for "Test OK"
def default_autotest(child, test_name):
child.sendline(test_name)
result = child.expect(["Test OK", "Test Failed",
"Command not found", pexpect.TIMEOUT], timeout = 900)
if result == 1:
return -1, "Fail"
elif result == 2:
return -1, "Fail [Not found]"
elif result == 3:
return -1, "Fail [Timeout]"
return 0, "Success"
# autotest used to run dump commands
# just fires the command
def dump_autotest(child, test_name):
child.sendline(test_name)
return 0, "Success"
# memory autotest
# reads output and waits for Test OK
def memory_autotest(child, test_name):
child.sendline(test_name)
regexp = "phys:0x[0-9a-f]*, len:0x([0-9a-f]*), virt:0x[0-9a-f]*, socket_id:[0-9]*"
index = child.expect([regexp, pexpect.TIMEOUT], timeout = 180)
if index != 0:
return -1, "Fail [Timeout]"
size = int(child.match.groups()[0], 16)
if size <= 0:
return -1, "Fail [Bad size]"
index = child.expect(["Test OK", "Test Failed",
pexpect.TIMEOUT], timeout = 10)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
return 0, "Success"
def spinlock_autotest(child, test_name):
i = 0
ir = 0
child.sendline(test_name)
while True:
index = child.expect(["Test OK",
"Test Failed",
"Hello from core ([0-9]*) !",
"Hello from within recursive locks from ([0-9]*) !",
pexpect.TIMEOUT], timeout = 20)
# ok
if index == 0:
break
# message, check ordering
elif index == 2:
if int(child.match.groups()[0]) < i:
return -1, "Fail [Bad order]"
i = int(child.match.groups()[0])
elif index == 3:
if int(child.match.groups()[0]) < ir:
return -1, "Fail [Bad order]"
ir = int(child.match.groups()[0])
# fail
elif index == 4:
return -1, "Fail [Timeout]"
elif index == 1:
return -1, "Fail"
return 0, "Success"
def rwlock_autotest(child, test_name):
i = 0
child.sendline(test_name)
while True:
index = child.expect(["Test OK",
"Test Failed",
"Hello from core ([0-9]*) !",
"Global write lock taken on master core ([0-9]*)",
pexpect.TIMEOUT], timeout = 10)
# ok
if index == 0:
if i != 0xffff:
return -1, "Fail [Message is missing]"
break
# message, check ordering
elif index == 2:
if int(child.match.groups()[0]) < i:
return -1, "Fail [Bad order]"
i = int(child.match.groups()[0])
# must be the last message, check ordering
elif index == 3:
i = 0xffff
elif index == 4:
return -1, "Fail [Timeout]"
# fail
else:
return -1, "Fail"
return 0, "Success"
def logs_autotest(child, test_name):
i = 0
child.sendline(test_name)
log_list = [
"TESTAPP1: this is a debug level message",
"TESTAPP1: this is a info level message",
"TESTAPP1: this is a warning level message",
"TESTAPP2: this is a info level message",
"TESTAPP2: this is a warning level message",
"TESTAPP1: this is a debug level message",
"TESTAPP1: this is a debug level message",
"TESTAPP1: this is a info level message",
"TESTAPP1: this is a warning level message",
"TESTAPP2: this is a info level message",
"TESTAPP2: this is a warning level message",
"TESTAPP1: this is a debug level message",
]
for log_msg in log_list:
index = child.expect([log_msg,
"Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout = 10)
if index == 3:
return -1, "Fail [Timeout]"
# not ok
elif index != 0:
return -1, "Fail"
index = child.expect(["Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout = 10)
return 0, "Success"
def timer_autotest(child, test_name):
i = 0
child.sendline(test_name)
index = child.expect(["Start timer stress tests \(30 seconds\)",
"Test Failed",
pexpect.TIMEOUT], timeout = 10)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
index = child.expect(["Start timer basic tests \(30 seconds\)",
"Test Failed",
pexpect.TIMEOUT], timeout = 40)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
prev_lcore_timer1 = -1
lcore_tim0 = -1
lcore_tim1 = -1
lcore_tim2 = -1
lcore_tim3 = -1
while True:
index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) count=([0-9]*) on core ([0-9]*)",
"Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout = 10)
if index == 1:
break
if index == 2:
return -1, "Fail"
elif index == 3:
return -1, "Fail [Timeout]"
try:
t = int(child.match.groups()[0])
id = int(child.match.groups()[1])
cnt = int(child.match.groups()[2])
lcore = int(child.match.groups()[3])
except:
return -1, "Fail [Cannot parse]"
# timer0 always expires on the same core when cnt < 20
if id == 0:
if lcore_tim0 == -1:
lcore_tim0 = lcore
elif lcore != lcore_tim0 and cnt < 20:
return -1, "Fail [lcore != lcore_tim0 (%d, %d)]"%(lcore, lcore_tim0)
if cnt > 21:
return -1, "Fail [tim0 cnt > 21]"
# timer1 each time expires on a different core
if id == 1:
if lcore == lcore_tim1:
return -1, "Fail [lcore == lcore_tim1 (%d, %d)]"%(lcore, lcore_tim1)
lcore_tim1 = lcore
if cnt > 10:
return -1, "Fail [tim1 cnt > 30]"
# timer0 always expires on the same core
if id == 2:
if lcore_tim2 == -1:
lcore_tim2 = lcore
elif lcore != lcore_tim2:
return -1, "Fail [lcore != lcore_tim2 (%d, %d)]"%(lcore, lcore_tim2)
if cnt > 30:
return -1, "Fail [tim2 cnt > 30]"
# timer0 always expires on the same core
if id == 3:
if lcore_tim3 == -1:
lcore_tim3 = lcore
elif lcore != lcore_tim3:
return -1, "Fail [lcore_tim3 changed (%d -> %d)]"%(lcore, lcore_tim3)
if cnt > 30:
return -1, "Fail [tim3 cnt > 30]"
# must be 2 different cores
if lcore_tim0 == lcore_tim3:
return -1, "Fail [lcore_tim0 (%d) == lcore_tim3 (%d)]"%(lcore_tim0, lcore_tim3)
return 0, "Success"
def ring_autotest(child, test_name):
child.sendline(test_name)
index = child.expect(["Test OK", "Test Failed",
pexpect.TIMEOUT], timeout = 1500)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
child.sendline("set_watermark test 100")
child.sendline("dump_ring test")
index = child.expect([" watermark=100",
pexpect.TIMEOUT], timeout = 1)
if index != 0:
return -1, "Fail [Bad watermark]"
return 0, "Success"

View File

@ -1,192 +0,0 @@
#!/usr/bin/env python
# BSD LICENSE
#
# Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys, re
import numpy as np
import matplotlib
matplotlib.use('Agg') # we don't want to use X11
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
INT = "([-+]?[0-9][0-9]*)"
class MempoolTest:
l = []
def __init__(self):
pass
# sort a test case list
def sort(self, x, y):
for t in [ "cache", "cores", "n_get_bulk", "n_put_bulk",
"n_keep", "rate" ]:
if x[t] > y[t]:
return 1
if x[t] < y[t]:
return -1
return 0
# add a test case
def add(self, **args):
self.l.append(args)
# get an ordered list matching parameters
# ex: r.get(enq_core=1, deq_core=1)
def get(self, **args):
retlist = []
for t in self.l:
add_it = 1
for a in args:
if args[a] != t[a]:
add_it = 0
break
if add_it:
retlist.append(t)
retlist.sort(cmp=self.sort)
return retlist
# return an ordered list of all values for this param or param list
# ex: r.get_value_list("enq_core")
def get_value_list(self, param):
retlist = []
if type(param) is not list:
param = [param]
for t in self.l:
entry = []
for p in param:
entry.append(t[p])
if len(entry) == 1:
entry = entry[0]
else:
entry = tuple(entry)
if not entry in retlist:
retlist.append(entry)
retlist.sort()
return retlist
# read the file and return a MempoolTest object containing all data
def read_data_from_file(filename):
mempool_test = MempoolTest()
# parse the file: it produces a list of dict containing the data for
# each test case (each dict in the list corresponds to a line)
f = open(filename)
while True:
l = f.readline()
if l == "":
break
regexp = "mempool_autotest "
regexp += "cache=%s cores=%s "%(INT, INT)
regexp += "n_get_bulk=%s n_put_bulk=%s "%(INT, INT)
regexp += "n_keep=%s rate_persec=%s"%(INT, INT)
m = re.match(regexp, l)
if m == None:
continue
mempool_test.add(cache = int(m.groups()[0]),
cores = int(m.groups()[1]),
n_get_bulk = int(m.groups()[2]),
n_put_bulk = int(m.groups()[3]),
n_keep = int(m.groups()[4]),
rate = int(m.groups()[5]))
f.close()
return mempool_test
def millions(x, pos):
return '%1.1fM' % (x*1e-6)
# graph one, with specific parameters -> generate a .svg file
def graph_one(str, mempool_test, cache, cores, n_keep):
filename = "mempool_%d_%d_%d.svg"%(cache, cores, n_keep)
n_get_bulk_list = mempool_test.get_value_list("n_get_bulk")
N_n_get_bulk = len(n_get_bulk_list)
get_names = map(lambda x:"get=%d"%x, n_get_bulk_list)
n_put_bulk_list = mempool_test.get_value_list("n_put_bulk")
N_n_put_bulk = len(n_put_bulk_list)
put_names = map(lambda x:"put=%d"%x, n_put_bulk_list)
N = N_n_get_bulk * (N_n_put_bulk + 1)
rates = []
colors = []
for n_get_bulk in mempool_test.get_value_list("n_get_bulk"):
col = 0.
for n_put_bulk in mempool_test.get_value_list("n_put_bulk"):
col += 0.9 / len(mempool_test.get_value_list("n_put_bulk"))
r = mempool_test.get(cache=cache, cores=cores,
n_get_bulk=n_get_bulk,
n_put_bulk=n_put_bulk, n_keep=n_keep)
if len(r) != 0:
r = r[0]["rate"]
rates.append(r)
colors.append((1. - col, 0.2, col, 1.)) # rgba
rates.append(0)
colors.append((0.,0.,0.,0.))
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars: can also be len(x) sequence
formatter = FuncFormatter(millions)
fig = plt.figure()
p = plt.bar(ind, tuple(rates), width, color=tuple(colors))
fig.axes[0].yaxis.set_major_formatter(formatter)
plt.ylabel('Obj/sec')
#plt.ylim(0, 400000000.)
title = "Mempool autotest \"%s\"\n"%(str)
title += "cache=%d, core(s)=%d, n_keep=%d"%(cache, cores, n_keep)
plt.title(title)
ind_names = np.arange(N_n_get_bulk) * (N_n_put_bulk+1) + (N_n_put_bulk+1) / 2
plt.xticks(ind_names, tuple(get_names))
plt.legend(tuple([p[i] for i in range(N_n_put_bulk)]), tuple(put_names),
loc="upper left")
plt.savefig(filename)
if len(sys.argv) != 3:
print "usage: graph_mempool.py file title"
sys.exit(1)
mempool_test = read_data_from_file(sys.argv[1])
for cache, cores, n_keep in mempool_test.get_value_list(["cache", "cores",
"n_keep"]):
graph_one(sys.argv[2], mempool_test, cache, cores, n_keep)

View File

@ -1,200 +0,0 @@
#!/usr/bin/env python
# BSD LICENSE
#
# Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys, re
import numpy as np
import matplotlib
matplotlib.use('Agg') # we don't want to use X11
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
INT = "([-+]?[0-9][0-9]*)"
class RingTest:
l = []
def __init__(self):
pass
# sort a test case list
def sort(self, x, y):
for t in [ "enq_core", "deq_core", "enq_bulk", "deq_bulk", "rate" ]:
if x[t] > y[t]:
return 1
if x[t] < y[t]:
return -1
return 0
# add a test case
def add(self, **args):
self.l.append(args)
# get an ordered list matching parameters
# ex: r.get(enq_core=1, deq_core=1)
def get(self, **args):
retlist = []
for t in self.l:
add_it = 1
for a in args:
if args[a] != t[a]:
add_it = 0
break
if add_it:
retlist.append(t)
retlist.sort(cmp=self.sort)
return retlist
# return an ordered list of all values for this param or param list
# ex: r.get_value_list("enq_core")
def get_value_list(self, param):
retlist = []
if type(param) is not list:
param = [param]
for t in self.l:
entry = []
for p in param:
entry.append(t[p])
if len(entry) == 1:
entry = entry[0]
else:
entry = tuple(entry)
if not entry in retlist:
retlist.append(entry)
retlist.sort()
return retlist
# read the file and return a RingTest object containing all data
def read_data_from_file(filename):
ring_test = RingTest()
# parse the file: it produces a list of dict containing the data for
# each test case (each dict in the list corresponds to a line)
f = open(filename)
while True:
l = f.readline()
if l == "":
break
regexp = "ring_autotest "
regexp += "e/d_core=%s,%s e/d_bulk=%s,%s "%(INT, INT, INT, INT)
regexp += "sp=%s sc=%s "%(INT, INT)
regexp += "rate_persec=%s"%(INT)
m = re.match(regexp, l)
if m == None:
continue
ring_test.add(enq_core = int(m.groups()[0]),
deq_core = int(m.groups()[1]),
enq_bulk = int(m.groups()[2]),
deq_bulk = int(m.groups()[3]),
sp = int(m.groups()[4]),
sc = int(m.groups()[5]),
rate = int(m.groups()[6]))
f.close()
return ring_test
def millions(x, pos):
return '%1.1fM' % (x*1e-6)
# graph one, with specific parameters -> generate a .svg file
def graph_one(str, ring_test, enq_core, deq_core, sp, sc):
filename = "ring_%d_%d"%(enq_core, deq_core)
if sp:
sp_str = "sp"
else:
sp_str = "mp"
if sc:
sc_str = "sc"
else:
sc_str = "mc"
filename += "_%s_%s.svg"%(sp_str, sc_str)
enq_bulk_list = ring_test.get_value_list("enq_bulk")
N_enq_bulk = len(enq_bulk_list)
enq_names = map(lambda x:"enq=%d"%x, enq_bulk_list)
deq_bulk_list = ring_test.get_value_list("deq_bulk")
N_deq_bulk = len(deq_bulk_list)
deq_names = map(lambda x:"deq=%d"%x, deq_bulk_list)
N = N_enq_bulk * (N_deq_bulk + 1)
rates = []
colors = []
for enq_bulk in ring_test.get_value_list("enq_bulk"):
col = 0.
for deq_bulk in ring_test.get_value_list("deq_bulk"):
col += 0.9 / len(ring_test.get_value_list("deq_bulk"))
r = ring_test.get(enq_core=enq_core, deq_core=deq_core,
enq_bulk=enq_bulk, deq_bulk=deq_bulk,
sp=sp, sc=sc)
r = r[0]["rate"]
rates.append(r)
colors.append((1. - col, 0.2, col, 1.)) # rgba
rates.append(0)
colors.append((0.,0.,0.,0.))
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars: can also be len(x) sequence
formatter = FuncFormatter(millions)
fig = plt.figure()
p = plt.bar(ind, tuple(rates), width, color=tuple(colors))
fig.axes[0].yaxis.set_major_formatter(formatter)
plt.ylabel('Obj/sec')
#plt.ylim(0, 400000000.)
plt.title("Ring autotest \"%s\"\nenq core(s)=%d, deq core(s)=%d, %s, %s"\
%(str, enq_core, deq_core, sp_str, sc_str))
ind_names = np.arange(N_enq_bulk) * (N_deq_bulk+1) + (N_deq_bulk+1) / 2
plt.xticks(ind_names, tuple(enq_names))
plt.legend(tuple([p[i] for i in range(N_deq_bulk)]), tuple(deq_names),
loc="upper left")
plt.savefig(filename)
if len(sys.argv) != 3:
print "usage: graph_ring.py file title"
sys.exit(1)
ring_test = read_data_from_file(sys.argv[1])
for enq_core, deq_core, sp, sc in \
ring_test.get_value_list(["enq_core", "deq_core", "sp", "sc"]):
graph_one(sys.argv[2], ring_test, enq_core, deq_core, sp, sc)