numam-spdk/test/unit/unittest.sh

216 lines
7.6 KiB
Bash
Raw Normal View History

#!/usr/bin/env bash
#
# Environment variables:
# $valgrind Specify the valgrind command line, if not
# then a default command line is used
set -xe
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $(dirname $0)/../..)
cd "$rootdir"
# if ASAN is enabled, use it. If not use valgrind if installed but allow
# the env variable to override the default shown below.
if [ -z ${valgrind+x} ]; then
if grep -q '#undef SPDK_CONFIG_ASAN' $rootdir/include/spdk/config.h && hash valgrind; then
valgrind='valgrind --leak-check=full --error-exitcode=2'
else
valgrind=''
fi
fi
# setup local unit test coverage if cov is available
if hash lcov && grep -q '#define SPDK_CONFIG_COVERAGE 1' $rootdir/include/spdk/config.h; then
cov_avail="yes"
else
cov_avail="no"
fi
if [ "$cov_avail" = "yes" ]; then
# set unit test output dir if not specified in env var
if [ -z ${UT_COVERAGE+x} ]; then
UT_COVERAGE="ut_coverage"
fi
mkdir -p $UT_COVERAGE
export LCOV_OPTS="
--rc lcov_branch_coverage=1
--rc lcov_function_coverage=1
--rc genhtml_branch_coverage=1
--rc genhtml_function_coverage=1
--rc genhtml_legend=1
--rc geninfo_all_blocks=1
"
export LCOV="lcov $LCOV_OPTS --no-external"
# zero out coverage data
$LCOV -q -c -i -d . -t "Baseline" -o $UT_COVERAGE/ut_cov_base.info
fi
# workaround for valgrind v3.13 on arm64
if [ $(uname -m) = "aarch64" ]; then
export LD_HWCAP_MASK=1
fi
$valgrind $testdir/include/spdk/histogram_data.h/histogram_ut
$valgrind $testdir/lib/bdev/bdev.c/bdev_ut
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
$valgrind $testdir/lib/bdev/bdev_raid.c/bdev_raid_ut
$valgrind $testdir/lib/bdev/bdev_zone.c/bdev_zone_ut
$valgrind $testdir/lib/bdev/part.c/part_ut
$valgrind $testdir/lib/bdev/scsi_nvme.c/scsi_nvme_ut
$valgrind $testdir/lib/bdev/gpt/gpt.c/gpt_ut
$valgrind $testdir/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut
$valgrind $testdir/lib/bdev/vbdev_zone_block.c/vbdev_zone_block_ut
if grep -q '#define SPDK_CONFIG_CRYPTO 1' $rootdir/include/spdk/config.h; then
$valgrind $testdir/lib/bdev/crypto.c/crypto_ut
fi
if grep -q '#define SPDK_CONFIG_REDUCE 1' $rootdir/include/spdk/config.h; then
$valgrind $testdir/lib/bdev/compress.c/compress_ut
fi
if grep -q '#define SPDK_CONFIG_PMDK 1' $rootdir/include/spdk/config.h; then
$valgrind $testdir/lib/bdev/pmem/bdev_pmem_ut
fi
$valgrind $testdir/lib/bdev/mt/bdev.c/bdev_ut
$valgrind $testdir/lib/blob/blob.c/blob_ut
$valgrind $testdir/lib/blobfs/tree.c/tree_ut
$valgrind $testdir/lib/blobfs/blobfs_async_ut/blobfs_async_ut
# blobfs_sync_ut hangs when run under valgrind, so don't use $valgrind
$testdir/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut
$valgrind $testdir/lib/blobfs/blobfs_bdev.c/blobfs_bdev_ut
$valgrind $testdir/lib/event/subsystem.c/subsystem_ut
$valgrind $testdir/lib/event/app.c/app_ut
$valgrind $testdir/lib/sock/sock.c/sock_ut
$valgrind $testdir/lib/nvme/nvme.c/nvme_ut
$valgrind $testdir/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut
$valgrind $testdir/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut
$valgrind $testdir/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut
$valgrind $testdir/lib/nvme/nvme_ns.c/nvme_ns_ut
$valgrind $testdir/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut
$valgrind $testdir/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut
$valgrind $testdir/lib/nvme/nvme_qpair.c/nvme_qpair_ut
$valgrind $testdir/lib/nvme/nvme_pcie.c/nvme_pcie_ut
$valgrind $testdir/lib/nvme/nvme_quirks.c/nvme_quirks_ut
$valgrind $testdir/lib/nvme/nvme_tcp.c/nvme_tcp_ut
if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/include/spdk/config.h; then
$valgrind $testdir/lib/nvme/nvme_rdma.c/nvme_rdma_ut
fi
$valgrind $testdir/lib/ioat/ioat.c/ioat_ut
$valgrind $testdir/lib/json/json_parse.c/json_parse_ut
$valgrind $testdir/lib/json/json_util.c/json_util_ut
$valgrind $testdir/lib/json/json_write.c/json_write_ut
$valgrind $testdir/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut
$valgrind $testdir/lib/log/log.c/log_ut
$valgrind $testdir/lib/nvmf/ctrlr.c/ctrlr_ut
$valgrind $testdir/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut
$valgrind $testdir/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut
if grep -q '#define SPDK_CONFIG_RDMA 1' $rootdir/include/spdk/config.h; then
$valgrind $testdir/lib/nvmf/rdma.c/rdma_ut
fi
$valgrind $testdir/lib/nvmf/subsystem.c/subsystem_ut
$valgrind $testdir/lib/nvmf/tcp.c/tcp_ut
$valgrind $testdir/lib/scsi/dev.c/dev_ut
$valgrind $testdir/lib/scsi/lun.c/lun_ut
$valgrind $testdir/lib/scsi/scsi.c/scsi_ut
$valgrind $testdir/lib/scsi/scsi_bdev.c/scsi_bdev_ut
$valgrind $testdir/lib/scsi/scsi_pr.c/scsi_pr_ut
$valgrind $testdir/lib/lvol/lvol.c/lvol_ut
$valgrind $testdir/lib/notify/notify.c/notify_ut
$valgrind $testdir/lib/iscsi/conn.c/conn_ut
$valgrind $testdir/lib/iscsi/param.c/param_ut
$valgrind $testdir/lib/iscsi/tgt_node.c/tgt_node_ut $testdir/lib/iscsi/tgt_node.c/tgt_node.conf
$valgrind $testdir/lib/iscsi/iscsi.c/iscsi_ut
$valgrind $testdir/lib/iscsi/init_grp.c/init_grp_ut $testdir/lib/iscsi/init_grp.c/init_grp.conf
$valgrind $testdir/lib/iscsi/portal_grp.c/portal_grp_ut $testdir/lib/iscsi/portal_grp.c/portal_grp.conf
if grep -q '#define SPDK_CONFIG_REDUCE 1' $rootdir/config.h; then
$valgrind $testdir/lib/reduce/reduce.c/reduce_ut
fi
$valgrind $testdir/lib/thread/thread.c/thread_ut
$valgrind $testdir/lib/util/base64.c/base64_ut
$valgrind $testdir/lib/util/bit_array.c/bit_array_ut
$valgrind $testdir/lib/util/cpuset.c/cpuset_ut
$valgrind $testdir/lib/util/crc16.c/crc16_ut
$valgrind $testdir/lib/util/crc32_ieee.c/crc32_ieee_ut
$valgrind $testdir/lib/util/crc32c.c/crc32c_ut
$valgrind $testdir/lib/util/string.c/string_ut
$valgrind $testdir/lib/util/dif.c/dif_ut
if [ $(uname -s) = Linux ]; then
$valgrind $testdir/lib/vhost/vhost.c/vhost_ut
$valgrind $testdir/lib/ftl/ftl_rwb.c/ftl_rwb_ut
$valgrind $testdir/lib/ftl/ftl_ppa/ftl_ppa_ut
$valgrind $testdir/lib/ftl/ftl_band.c/ftl_band_ut
$valgrind $testdir/lib/ftl/ftl_reloc.c/ftl_reloc_ut
$valgrind $testdir/lib/ftl/ftl_wptr/ftl_wptr_ut
$valgrind $testdir/lib/ftl/ftl_md/ftl_md_ut
$valgrind $testdir/lib/ftl/ftl_io.c/ftl_io_ut
fi
if [ -e $testdir/lib/nvmf/fc.c/fc_ut ]; then
$valgrind $testdir/lib/nvmf/fc.c/fc_ut
fi
if [ -e $testdir/lib/nvmf/fc_ls.c/fc_ls_ut ]; then
$valgrind $testdir/lib/nvmf/fc_ls.c/fc_ls_ut
fi
# local unit test coverage
if [ "$cov_avail" = "yes" ]; then
$LCOV -q -d . -c -t "$(hostname)" -o $UT_COVERAGE/ut_cov_test.info
$LCOV -q -a $UT_COVERAGE/ut_cov_base.info -a $UT_COVERAGE/ut_cov_test.info -o $UT_COVERAGE/ut_cov_total.info
$LCOV -q -a $UT_COVERAGE/ut_cov_total.info -o $UT_COVERAGE/ut_cov_unit.info
$LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/app/*" -o $UT_COVERAGE/ut_cov_unit.info
$LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/dpdk/*" -o $UT_COVERAGE/ut_cov_unit.info
$LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/examples/*" -o $UT_COVERAGE/ut_cov_unit.info
$LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/include/*" -o $UT_COVERAGE/ut_cov_unit.info
$LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/lib/vhost/rte_vhost/*" -o $UT_COVERAGE/ut_cov_unit.info
$LCOV -q -r $UT_COVERAGE/ut_cov_unit.info "$rootdir/test/*" -o $UT_COVERAGE/ut_cov_unit.info
rm -f $UT_COVERAGE/ut_cov_base.info $UT_COVERAGE/ut_cov_test.info
genhtml $UT_COVERAGE/ut_cov_unit.info --output-directory $UT_COVERAGE
# git -C option not used for compatibility reasons
cd $rootdir
git clean -f "*.gcda"
cd -
fi
set +x
echo
echo
echo "====================="
echo "All unit tests passed"
echo "====================="
if [ "$cov_avail" = "yes" ]; then
echo "Note: coverage report is here: $rootdir/$UT_COVERAGE"
else
echo "WARN: lcov not installed or SPDK built without coverage!"
fi
if grep -q '#undef SPDK_CONFIG_ASAN' $rootdir/include/spdk/config.h && [ "$valgrind" = "" ]; then
echo "WARN: neither valgrind nor ASAN is enabled!"
fi
echo
echo