test: remove autotest python wrapper

This tool was used in the past to execute unit tests with make.
Meson has its own list of unit test, and its own way of calling them.

Since the switch to meson only builds, there is nothing depending on the
script in DPDK itself.
No CI seems to call it, time to put it to rest.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Aaron Conole <aconole@redhat.com>
This commit is contained in:
David Marchand 2021-10-08 14:24:37 +02:00
parent 4a985f4e84
commit 8c745bb623
5 changed files with 0 additions and 1628 deletions

View File

@ -1615,7 +1615,6 @@ Test Applications
-----------------
Unit tests framework
F: app/test/autotest*
F: app/test/commands.c
F: app/test/get-coremask.sh
F: app/test/has-hugepage.sh

View File

@ -1,50 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# Script that uses either test app or qemu controlled by python-pexpect
import autotest_data
import autotest_runner
import sys
def usage():
print("Usage: autotest.py [test app|test iso image] ",
"[target] [allow|-block]")
if len(sys.argv) < 3:
usage()
sys.exit(1)
target = sys.argv[2]
test_allowlist = None
test_blocklist = None
# get blocklist/allowlist
if len(sys.argv) > 3:
testlist = sys.argv[3].split(',')
testlist = [test.lower() for test in testlist]
if testlist[0].startswith('-'):
testlist[0] = testlist[0].lstrip('-')
test_blocklist = testlist
else:
test_allowlist = testlist
cmdline = "%s -c f" % (sys.argv[1])
print(cmdline)
# how many workers to run tests with. FreeBSD doesn't support multiple primary
# processes, so make it 1, otherwise make it 4. ignored for non-parallel tests
n_processes = 1 if "bsd" in target else 4
runner = autotest_runner.AutotestRunner(cmdline, target, test_blocklist,
test_allowlist, n_processes)
runner.parallel_tests = autotest_data.parallel_test_list[:]
runner.non_parallel_tests = autotest_data.non_parallel_test_list[:]
num_fails = runner.run_all_tests()
sys.exit(num_fails)

View File

@ -1,797 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# Test data for autotests
from autotest_test_funcs import *
# groups of tests that can be run in parallel
# the grouping has been found largely empirically
parallel_test_list = [
{
"Name": "Timer autotest",
"Command": "timer_autotest",
"Func": timer_autotest,
"Report": None,
},
{
"Name": "Debug autotest",
"Command": "debug_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Errno autotest",
"Command": "errno_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Meter autotest",
"Command": "meter_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Common autotest",
"Command": "common_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Resource autotest",
"Command": "resource_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Memory autotest",
"Command": "memory_autotest",
"Func": memory_autotest,
"Report": None,
},
{
"Name": "Read/write lock autotest",
"Command": "rwlock_autotest",
"Func": rwlock_autotest,
"Report": None,
},
{
"Name": "Lcores autotest",
"Command": "lcores_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Logs autotest",
"Command": "logs_autotest",
"Func": logs_autotest,
"Report": None,
},
{
"Name": "CPU flags autotest",
"Command": "cpuflags_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Version autotest",
"Command": "version_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "EAL filesystem autotest",
"Command": "eal_fs_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "EAL flags autotest",
"Command": "eal_flags_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Hash autotest",
"Command": "hash_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "LPM autotest",
"Command": "lpm_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "LPM6 autotest",
"Command": "lpm6_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "RIB autotest",
"Command": "rib_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "RIB slow autotest",
"Command": "rib_slow_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "RIB6 autotest",
"Command": "rib6_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "RIB6 slow autotest",
"Command": "rib6_slow_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "FIB autotest",
"Command": "fib_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "FIB slow autotest",
"Command": "fib_slow_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "FIB6 autotest",
"Command": "fib6_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "FIB6 slow autotest",
"Command": "fib6_slow_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Memcpy autotest",
"Command": "memcpy_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Memzone autotest",
"Command": "memzone_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "String autotest",
"Command": "string_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Malloc autotest",
"Command": "malloc_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Multi-process autotest",
"Command": "multiprocess_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Mbuf autotest",
"Command": "mbuf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Per-lcore autotest",
"Command": "per_lcore_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Ring autotest",
"Command": "ring_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Spinlock autotest",
"Command": "spinlock_autotest",
"Func": spinlock_autotest,
"Report": None,
},
{
"Name": "Ticketlock autotest",
"Command": "ticketlock_autotest",
"Func": ticketlock_autotest,
"Report": None,
},
{
"Name": "MCSlock autotest",
"Command": "mcslock_autotest",
"Func": mcslock_autotest,
"Report": None,
},
{
"Name": "Byte order autotest",
"Command": "byteorder_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "TAILQ autotest",
"Command": "tailq_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Command-line autotest",
"Command": "cmdline_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Interrupts autotest",
"Command": "interrupt_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Function reentrancy autotest",
"Command": "func_reentrancy_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Mempool autotest",
"Command": "mempool_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Atomics autotest",
"Command": "atomic_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Prefetch autotest",
"Command": "prefetch_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Red autotest",
"Command": "red_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "PMD ring autotest",
"Command": "ring_pmd_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Access list control autotest",
"Command": "acl_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Sched autotest",
"Command": "sched_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Eventdev selftest octeontx",
"Command": "eventdev_selftest_octeontx",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Event ring autotest",
"Command": "event_ring_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Table autotest",
"Command": "table_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Flow classify autotest",
"Command": "flow_classify_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Event eth rx adapter autotest",
"Command": "event_eth_rx_adapter_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "User delay",
"Command": "user_delay_us",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Rawdev autotest",
"Command": "rawdev_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Kvargs autotest",
"Command": "kvargs_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Link bonding autotest",
"Command": "link_bonding_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Link bonding mode4 autotest",
"Command": "link_bonding_mode4_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Link bonding rssconf autotest",
"Command": "link_bonding_rssconf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Crc autotest",
"Command": "crc_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Distributor autotest",
"Command": "distributor_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Reorder autotest",
"Command": "reorder_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Barrier autotest",
"Command": "barrier_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Bitmap test",
"Command": "bitmap_test",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Bitops test",
"Command": "bitops_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Hash multiwriter autotest",
"Command": "hash_multiwriter_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Service autotest",
"Command": "service_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Timer racecond autotest",
"Command": "timer_racecond_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Member autotest",
"Command": "member_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Efd_autotest",
"Command": "efd_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Thash autotest",
"Command": "thash_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Hash function autotest",
"Command": "hash_functions_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev sw mvsam autotest",
"Command": "cryptodev_sw_mvsam_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev dpaa2 sec autotest",
"Command": "cryptodev_dpaa2_sec_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev dpaa sec autotest",
"Command": "cryptodev_dpaa_sec_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev qat autotest",
"Command": "cryptodev_qat_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev aesni mb autotest",
"Command": "cryptodev_aesni_mb_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev openssl autotest",
"Command": "cryptodev_openssl_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev scheduler autotest",
"Command": "cryptodev_scheduler_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev aesni gcm autotest",
"Command": "cryptodev_aesni_gcm_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev null autotest",
"Command": "cryptodev_null_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev sw snow3g autotest",
"Command": "cryptodev_sw_snow3g_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev sw kasumi autotest",
"Command": "cryptodev_sw_kasumi_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Cryptodev_sw_zuc_autotest",
"Command": "cryptodev_sw_zuc_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Reciprocal division",
"Command": "reciprocal_division",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Red all",
"Command": "red_all",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Fbarray autotest",
"Command": "fbarray_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "External memory autotest",
"Command": "external_mem_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Metrics autotest",
"Command": "metrics_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Bitratestats autotest",
"Command": "bitratestats_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Latencystats autotest",
"Command": "latencystats_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Pdump autotest",
"Command": "pdump_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "IPsec_SAD",
"Command": "ipsec_sad_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Checksum autotest",
"Command": "cksum_autotest",
"Func": default_autotest,
"Report": None,
},
#
#Please always keep all dump tests at the end and together!
#
{
"Name": "Dump physmem",
"Command": "dump_physmem",
"Func": dump_autotest,
"Report": None,
},
{
"Name": "Dump memzone",
"Command": "dump_memzone",
"Func": dump_autotest,
"Report": None,
},
{
"Name": "Dump struct sizes",
"Command": "dump_struct_sizes",
"Func": dump_autotest,
"Report": None,
},
{
"Name": "Dump mempool",
"Command": "dump_mempool",
"Func": dump_autotest,
"Report": None,
},
{
"Name": "Dump malloc stats",
"Command": "dump_malloc_stats",
"Func": dump_autotest,
"Report": None,
},
{
"Name": "Dump devargs",
"Command": "dump_devargs",
"Func": dump_autotest,
"Report": None,
},
{
"Name": "Dump log types",
"Command": "dump_log_types",
"Func": dump_autotest,
"Report": None,
},
{
"Name": "Dump_ring",
"Command": "dump_ring",
"Func": dump_autotest,
"Report": None,
},
]
# tests that should not be run when any other tests are running
non_parallel_test_list = [
{
"Name": "Eventdev common autotest",
"Command": "eventdev_common_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Eventdev selftest sw",
"Command": "eventdev_selftest_sw",
"Func": default_autotest,
"Report": None,
},
{
"Name": "KNI autotest",
"Command": "kni_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Mempool performance autotest",
"Command": "mempool_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Memcpy performance autotest",
"Command": "memcpy_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Hash performance autotest",
"Command": "hash_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Hash read-write concurrency functional autotest",
"Command": "hash_readwrite_func_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Hash read-write concurrency perf autotest",
"Command": "hash_readwrite_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Hash read-write lock-free concurrency perf autotest",
"Command": "hash_readwrite_lf_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Power autotest",
"Command": "power_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Power cpufreq autotest",
"Command": "power_cpufreq_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Power KVM VM autotest",
"Command": "power_kvm_vm_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Timer performance autotest",
"Command": "timer_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Pmd perf autotest",
"Command": "pmd_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Ring pmd perf autotest",
"Command": "ring_pmd_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Distributor perf autotest",
"Command": "distributor_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Red_perf",
"Command": "red_perf",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Lpm6 perf autotest",
"Command": "lpm6_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Lpm perf autotest",
"Command": "lpm_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "FIB perf autotest",
"Command": "fib_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "FIB6 perf autotest",
"Command": "fib6_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Efd perf autotest",
"Command": "efd_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Member perf autotest",
"Command": "member_perf_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "Reciprocal division perf",
"Command": "reciprocal_division_perf",
"Func": default_autotest,
"Report": None,
},
{
"Name": "RCU QSBR autotest",
"Command": "rcu_qsbr_autotest",
"Func": default_autotest,
"Report": None,
},
{
"Name": "RCU QSBR performance autotest",
"Command": "rcu_qsbr_perf_autotest",
"Func": default_autotest,
"Report": None,
},
#
# Please always make sure that ring_perf is the last test!
#
{
"Name": "Ring performance autotest",
"Command": "ring_perf_autotest",
"Func": default_autotest,
"Report": None,
},
]

View File

@ -1,434 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# The main logic behind running autotests in parallel
import io
import csv
from multiprocessing import Pool, Queue
import pexpect
import re
import subprocess
import sys
import time
import glob
import os
# wait for prompt
def wait_prompt(child):
try:
child.sendline()
result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
timeout=120)
except:
return False
if result == 0:
return True
else:
return False
# get all valid NUMA nodes
def get_numa_nodes():
return [
int(
re.match(r"node(\d+)", os.path.basename(node))
.group(1)
)
for node in glob.glob("/sys/devices/system/node/node*")
]
# find first (or any, really) CPU on a particular node, will be used to spread
# processes around NUMA nodes to avoid exhausting memory on particular node
def first_cpu_on_node(node_nr):
cpu_path = glob.glob("/sys/devices/system/node/node%d/cpu*" % node_nr)
r = re.compile(r"cpu(\d+)")
cpu_name = filter(None,
map(r.match,
map(os.path.basename, cpu_path)
)
)
return int(next(cpu_name).group(1))
pool_child = None # per-process child
# we initialize each worker with a queue because we need per-pool unique
# command-line arguments, but we cannot do different arguments in an initializer
# because the API doesn't allow per-worker initializer arguments. so, instead,
# we will initialize with a shared queue, and dequeue command-line arguments
# from this queue
def pool_init(queue, result_queue):
global pool_child
cmdline, prefix = queue.get()
start_time = time.time()
name = ("Start %s" % prefix) if prefix != "" else "Start"
# use default prefix if no prefix was specified
prefix_cmdline = "--file-prefix=%s" % prefix if prefix != "" else ""
# append prefix to cmdline
cmdline = "%s %s" % (cmdline, prefix_cmdline)
# prepare logging of init
startuplog = io.StringIO()
# run test app
try:
print("\n%s %s\n" % ("=" * 20, prefix), file=startuplog)
print("\ncmdline=%s" % cmdline, file=startuplog)
pool_child = pexpect.spawn(cmdline, logfile=startuplog, encoding='utf-8')
# wait for target to boot
if not wait_prompt(pool_child):
pool_child.close()
result = tuple((-1,
"Fail [No prompt]",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
pool_child = None
else:
result = tuple((0,
"Success",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
except:
result = tuple((-1,
"Fail [Can't run]",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
pool_child = None
result_queue.put(result)
# run a test
# each result tuple in results list consists of:
# result value (0 or -1)
# result string
# test name
# total test run time (double)
# raw test log
# test report (if not available, should be None)
#
# this function needs to be outside AutotestRunner class because otherwise Pool
# won't work (or rather it will require quite a bit of effort to make it work).
def run_test(target, test):
global pool_child
if pool_child is None:
return -1, "Fail [No test process]", test["Name"], 0, "", None
# create log buffer for each test
# in multiprocessing environment, the logging would be
# interleaved and will create a mess, hence the buffering
logfile = io.StringIO()
pool_child.logfile = logfile
# make a note when the test started
start_time = time.time()
try:
# print test name to log buffer
print("\n%s %s\n" % ("-" * 20, test["Name"]), file=logfile)
# run test function associated with the test
result = test["Func"](pool_child, test["Command"])
# make a note when the test was finished
end_time = time.time()
log = logfile.getvalue()
# append test data to the result tuple
result += (test["Name"], end_time - start_time, log)
# call report function, if any defined, and supply it with
# target and complete log for test run
if test["Report"]:
report = test["Report"](target, log)
# append report to results tuple
result += (report,)
else:
# report is None
result += (None,)
except:
# make a note when the test crashed
end_time = time.time()
# mark test as failed
result = (-1, "Fail [Crash]", test["Name"],
end_time - start_time, logfile.getvalue(), None)
# return test results
return result
# class representing an instance of autotests run
class AutotestRunner:
cmdline = ""
parallel_test_groups = []
non_parallel_test_groups = []
logfile = None
csvwriter = None
target = ""
start = None
n_tests = 0
fails = 0
log_buffers = []
blocklist = []
allowlist = []
def __init__(self, cmdline, target, blocklist, allowlist, n_processes):
self.cmdline = cmdline
self.target = target
self.blocklist = blocklist
self.allowlist = allowlist
self.skipped = []
self.parallel_tests = []
self.non_parallel_tests = []
self.n_processes = n_processes
self.active_processes = 0
# parse the binary for available test commands
binary = cmdline.split()[0]
stripped = 'not stripped' not in \
subprocess.check_output(['file', binary]).decode()
if not stripped:
symbols = subprocess.check_output(['nm', binary]).decode()
self.avail_cmds = re.findall('test_register_(\w+)', symbols)
else:
self.avail_cmds = None
# log file filename
logfile = "%s.log" % target
csvfile = "%s.csv" % target
self.logfile = open(logfile, "w")
csvfile = open(csvfile, "w")
self.csvwriter = csv.writer(csvfile)
# prepare results table
self.csvwriter.writerow(["test_name", "test_result", "result_str"])
# set up cmdline string
def __get_cmdline(self, cpu_nr):
cmdline = ("taskset -c %i " % cpu_nr) + self.cmdline
return cmdline
def __process_result(self, result):
# unpack result tuple
test_result, result_str, test_name, \
test_time, log, report = result
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print results, test run time and total time since start
result = ("%s:" % test_name).ljust(30)
result += result_str.ljust(29)
result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
# don't print out total time every line, it's the same anyway
print(result + "[%02dm %02ds]" % (total_time / 60, total_time % 60))
# if test failed and it wasn't a "start" test
if test_result < 0:
self.fails += 1
# collect logs
self.log_buffers.append(log)
# create report if it exists
if report:
try:
f = open("%s_%s_report.rst" %
(self.target, test_name), "w")
except IOError:
print("Report for %s could not be created!" % test_name)
else:
with f:
f.write(report)
# write test result to CSV file
self.csvwriter.writerow([test_name, test_result, result_str])
# this function checks individual test and decides if this test should be in
# the group by comparing it against allowlist/blocklist. it also checks if
# the test is compiled into the binary, and marks it as skipped if necessary
def __filter_test(self, test):
test_cmd = test["Command"]
test_id = test_cmd
# dump tests are specified in full e.g. "Dump_mempool"
if "_autotest" in test_id:
test_id = test_id[:-len("_autotest")]
# filter out blocked/allowed tests
if self.blocklist and test_id in self.blocklist:
return False
if self.allowlist and test_id not in self.allowlist:
return False
# if test wasn't compiled in, remove it as well
if self.avail_cmds and test_cmd not in self.avail_cmds:
result = 0, "Skipped [Not compiled]", test_id, 0, "", None
self.skipped.append(tuple(result))
return False
return True
def __run_test_group(self, test_group, worker_cmdlines):
group_queue = Queue()
init_result_queue = Queue()
for proc, cmdline in enumerate(worker_cmdlines):
prefix = "test%i" % proc if len(worker_cmdlines) > 1 else ""
group_queue.put(tuple((cmdline, prefix)))
# create a pool of worker threads
# we will initialize child in the initializer, and we don't need to
# close the child because when the pool worker gets destroyed, child
# closes the process
pool = Pool(processes=len(worker_cmdlines),
initializer=pool_init,
initargs=(group_queue, init_result_queue))
results = []
# process all initialization results
for _ in range(len(worker_cmdlines)):
self.__process_result(init_result_queue.get())
# run all tests asynchronously
for test in test_group:
result = pool.apply_async(run_test, (self.target, test))
results.append(result)
# tell the pool to stop all processes once done
pool.close()
# iterate while we have group execution results to get
while len(results) > 0:
# iterate over a copy to be able to safely delete results
# this iterates over a list of group results
for async_result in results[:]:
# if the thread hasn't finished yet, continue
if not async_result.ready():
continue
res = async_result.get()
self.__process_result(res)
# remove result from results list once we're done with it
results.remove(async_result)
# iterate over test groups and run tests associated with them
def run_all_tests(self):
# filter groups
self.parallel_tests = list(
filter(self.__filter_test,
self.parallel_tests)
)
self.non_parallel_tests = list(
filter(self.__filter_test,
self.non_parallel_tests)
)
parallel_cmdlines = []
# FreeBSD doesn't have NUMA support
numa_nodes = get_numa_nodes()
if len(numa_nodes) > 0:
for proc in range(self.n_processes):
# spread cpu affinity between NUMA nodes to have less chance of
# running out of memory while running multiple test apps in
# parallel. to do that, alternate between NUMA nodes in a round
# robin fashion, and pick an arbitrary CPU from that node to
# taskset our execution to
numa_node = numa_nodes[self.active_processes % len(numa_nodes)]
cpu_nr = first_cpu_on_node(numa_node)
parallel_cmdlines += [self.__get_cmdline(cpu_nr)]
# increase number of active processes so that the next cmdline
# gets a different NUMA node
self.active_processes += 1
else:
parallel_cmdlines = [self.cmdline] * self.n_processes
print("Running tests with %d workers" % self.n_processes)
# create table header
print("")
print("Test name".ljust(30) + "Test result".ljust(29) +
"Test".center(9) + "Total".center(9))
print("=" * 80)
if len(self.skipped):
print("Skipped autotests:")
# print out any skipped tests
for result in self.skipped:
# unpack result tuple
test_result, result_str, test_name, _, _, _ = result
self.csvwriter.writerow([test_name, test_result, result_str])
t = ("%s:" % test_name).ljust(30)
t += result_str.ljust(29)
t += "[00m 00s]"
print(t)
# make a note of tests start time
self.start = time.time()
# whatever happens, try to save as much logs as possible
try:
if len(self.parallel_tests) > 0:
print("Parallel autotests:")
self.__run_test_group(self.parallel_tests, parallel_cmdlines)
if len(self.non_parallel_tests) > 0:
print("Non-parallel autotests:")
self.__run_test_group(self.non_parallel_tests, [self.cmdline])
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print out summary
print("=" * 80)
print("Total run time: %02dm %02ds" % (total_time / 60,
total_time % 60))
if self.fails != 0:
print("Number of failed tests: %s" % str(self.fails))
# write summary to logfile
self.logfile.write("Summary\n")
self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
self.logfile.write("Failed tests: ".ljust(
15) + "%i\n" % self.fails)
except:
print("Exception occurred")
print(sys.exc_info())
self.fails = 1
# drop logs from all executions to a logfile
for buf in self.log_buffers:
self.logfile.write(buf.replace("\r", ""))
return self.fails

View File

@ -1,346 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# Test functions
import pexpect
# default autotest, used to run most tests
# waits for "Test OK"
def default_autotest(child, test_name):
child.sendline(test_name)
result = child.expect(["Test OK", "Test Failed",
"Command not found", pexpect.TIMEOUT,
"Test Skipped"], timeout=900)
if result == 1:
return -1, "Fail"
elif result == 2:
return -1, "Fail [Not found]"
elif result == 3:
return -1, "Fail [Timeout]"
elif result == 4:
return 0, "Skipped [Not Run]"
return 0, "Success"
# autotest used to run dump commands
# just fires the command
def dump_autotest(child, test_name):
child.sendline(test_name)
return 0, "Success"
# memory autotest
# reads output and waits for Test OK
def memory_autotest(child, test_name):
lines = 0
error = ''
child.sendline(test_name)
while True:
regexp = "IOVA:0x[0-9a-f]*, len:([0-9]*), virt:0x[0-9a-f]*, " \
"socket_id:[0-9]*"
index = child.expect([regexp, "Test OK", "Test Failed",
pexpect.TIMEOUT], timeout=10)
if index == 3:
return -1, "Fail [Timeout]"
elif index == 1:
break
elif index == 2:
return -1, "Fail"
else:
lines = lines + 1
size = int(child.match.groups()[0], 10)
if size <= 0:
error = 'Bad size'
if lines <= 0:
return -1, "Fail [No entries]"
if error != '':
return -1, "Fail [{}]".format(error)
return 0, "Success"
def spinlock_autotest(child, test_name):
i = 0
ir = 0
child.sendline(test_name)
while True:
index = child.expect(["Test OK",
"Test Failed",
"Hello from core ([0-9]*) !",
"Hello from within recursive locks "
"from ([0-9]*) !",
pexpect.TIMEOUT], timeout=5)
# ok
if index == 0:
break
# message, check ordering
elif index == 2:
if int(child.match.groups()[0]) < i:
return -1, "Fail [Bad order]"
i = int(child.match.groups()[0])
elif index == 3:
if int(child.match.groups()[0]) < ir:
return -1, "Fail [Bad order]"
ir = int(child.match.groups()[0])
# fail
elif index == 4:
return -1, "Fail [Timeout]"
elif index == 1:
return -1, "Fail"
return 0, "Success"
def rwlock_autotest(child, test_name):
i = 0
child.sendline(test_name)
while True:
index = child.expect(["Test OK",
"Test Failed",
"Hello from core ([0-9]*) !",
"Global write lock taken on main "
"core ([0-9]*)",
pexpect.TIMEOUT], timeout=10)
# ok
if index == 0:
if i != 0xffff:
return -1, "Fail [Message is missing]"
break
# message, check ordering
elif index == 2:
if int(child.match.groups()[0]) < i:
return -1, "Fail [Bad order]"
i = int(child.match.groups()[0])
# must be the last message, check ordering
elif index == 3:
i = 0xffff
elif index == 4:
return -1, "Fail [Timeout]"
# fail
else:
return -1, "Fail"
return 0, "Success"
def ticketlock_autotest(child, test_name):
i = 0
ir = 0
child.sendline(test_name)
while True:
index = child.expect(["Test OK",
"Test Failed",
"Hello from core ([0-9]*) !",
"Hello from within recursive locks "
"from ([0-9]*) !",
pexpect.TIMEOUT], timeout=5)
# ok
if index == 0:
break
# message, check ordering
elif index == 2:
if int(child.match.groups()[0]) < i:
return -1, "Fail [Bad order]"
i = int(child.match.groups()[0])
elif index == 3:
if int(child.match.groups()[0]) < ir:
return -1, "Fail [Bad order]"
ir = int(child.match.groups()[0])
# fail
elif index == 4:
return -1, "Fail [Timeout]"
elif index == 1:
return -1, "Fail"
return 0, "Success"
def mcslock_autotest(child, test_name):
i = 0
ir = 0
child.sendline(test_name)
while True:
index = child.expect(["Test OK",
"Test Failed",
"lcore ([0-9]*) state: ([0-1])"
"MCS lock taken on core ([0-9]*)",
"MCS lock released on core ([0-9]*)",
pexpect.TIMEOUT], timeout=5)
# ok
if index == 0:
break
# message, check ordering
elif index == 2:
if int(child.match.groups()[0]) < i:
return -1, "Fail [Bad order]"
i = int(child.match.groups()[0])
elif index == 3:
if int(child.match.groups()[0]) < ir:
return -1, "Fail [Bad order]"
ir = int(child.match.groups()[0])
# fail
elif index == 4:
return -1, "Fail [Timeout]"
elif index == 1:
return -1, "Fail"
return 0, "Success"
def logs_autotest(child, test_name):
child.sendline(test_name)
log_list = [
"TESTAPP1: error message",
"TESTAPP1: critical message",
"TESTAPP2: critical message",
"TESTAPP1: error message",
]
for log_msg in log_list:
index = child.expect([log_msg,
"Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout=10)
if index == 3:
return -1, "Fail [Timeout]"
# not ok
elif index != 0:
return -1, "Fail"
index = child.expect(["Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout=10)
return 0, "Success"
def timer_autotest(child, test_name):
child.sendline(test_name)
index = child.expect(["Start timer stress tests",
"Test Failed",
pexpect.TIMEOUT], timeout=5)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
index = child.expect(["Start timer stress tests 2",
"Test Failed",
pexpect.TIMEOUT], timeout=5)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
index = child.expect(["Start timer basic tests",
"Test Failed",
pexpect.TIMEOUT], timeout=5)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
lcore_tim0 = -1
lcore_tim1 = -1
lcore_tim2 = -1
lcore_tim3 = -1
while True:
index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) "
"count=([0-9]*) on core ([0-9]*)",
"Test OK",
"Test Failed",
pexpect.TIMEOUT], timeout=10)
if index == 1:
break
if index == 2:
return -1, "Fail"
elif index == 3:
return -1, "Fail [Timeout]"
try:
id = int(child.match.groups()[1])
cnt = int(child.match.groups()[2])
lcore = int(child.match.groups()[3])
except:
return -1, "Fail [Cannot parse]"
# timer0 always expires on the same core when cnt < 20
if id == 0:
if lcore_tim0 == -1:
lcore_tim0 = lcore
elif lcore != lcore_tim0 and cnt < 20:
return -1, "Fail [lcore != lcore_tim0 (%d, %d)]" \
% (lcore, lcore_tim0)
if cnt > 21:
return -1, "Fail [tim0 cnt > 21]"
# timer1 each time expires on a different core
if id == 1:
if lcore == lcore_tim1:
return -1, "Fail [lcore == lcore_tim1 (%d, %d)]" \
% (lcore, lcore_tim1)
lcore_tim1 = lcore
if cnt > 10:
return -1, "Fail [tim1 cnt > 30]"
# timer0 always expires on the same core
if id == 2:
if lcore_tim2 == -1:
lcore_tim2 = lcore
elif lcore != lcore_tim2:
return -1, "Fail [lcore != lcore_tim2 (%d, %d)]" \
% (lcore, lcore_tim2)
if cnt > 30:
return -1, "Fail [tim2 cnt > 30]"
# timer0 always expires on the same core
if id == 3:
if lcore_tim3 == -1:
lcore_tim3 = lcore
elif lcore != lcore_tim3:
return -1, "Fail [lcore_tim3 changed (%d -> %d)]" \
% (lcore, lcore_tim3)
if cnt > 30:
return -1, "Fail [tim3 cnt > 30]"
# must be 2 different cores
if lcore_tim0 == lcore_tim3:
return -1, "Fail [lcore_tim0 (%d) == lcore_tim3 (%d)]" \
% (lcore_tim0, lcore_tim3)
return 0, "Success"
def ring_autotest(child, test_name):
child.sendline(test_name)
index = child.expect(["Test OK", "Test Failed",
pexpect.TIMEOUT], timeout=2)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
return 0, "Success"