check_format: Reformat the Bash code in compliance with shfmt

Change-Id: I93e7b9d355870b0528a0ac3382fba1a10a558d45
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1718
Community-CI: Mellanox Build Bot
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Michal Berger 2020-05-07 13:27:06 +02:00 committed by Jim Harris
parent 0c1d022b57
commit 844c8ec383
135 changed files with 3368 additions and 3397 deletions

View File

@ -35,7 +35,7 @@ $MAKE cc_version
$MAKE cxx_version $MAKE cxx_version
echo "** END ** Info for Hostname: $HOSTNAME" echo "** END ** Info for Hostname: $HOSTNAME"
function ocf_precompile { function ocf_precompile() {
# We compile OCF sources ourselves # We compile OCF sources ourselves
# They don't need to be checked with scanbuild and code coverage is not applicable # They don't need to be checked with scanbuild and code coverage is not applicable
# So we precompile OCF now for further use as standalone static library # So we precompile OCF now for further use as standalone static library
@ -48,7 +48,7 @@ function ocf_precompile {
./configure $config_params ./configure $config_params
} }
function make_fail_cleanup { function make_fail_cleanup() {
if [ -d $out/scan-build-tmp ]; then if [ -d $out/scan-build-tmp ]; then
scanoutput=$(ls -1 $out/scan-build-tmp/) scanoutput=$(ls -1 $out/scan-build-tmp/)
mv $out/scan-build-tmp/$scanoutput $out/scan-build mv $out/scan-build-tmp/$scanoutput $out/scan-build
@ -58,7 +58,7 @@ function make_fail_cleanup {
false false
} }
function scanbuild_make { function scanbuild_make() {
pass=true pass=true
$scanbuild $MAKE $MAKEFLAGS > $out/build_output.txt && rm -rf $out/scan-build-tmp || make_fail_cleanup $scanbuild $MAKE $MAKEFLAGS > $out/build_output.txt && rm -rf $out/scan-build-tmp || make_fail_cleanup
xtrace_disable xtrace_disable
@ -92,7 +92,7 @@ function scanbuild_make {
$pass $pass
} }
function porcelain_check { function porcelain_check() {
if [ $(git status --porcelain --ignore-submodules | wc -l) -ne 0 ]; then if [ $(git status --porcelain --ignore-submodules | wc -l) -ne 0 ]; then
echo "Generated files missing from .gitignore:" echo "Generated files missing from .gitignore:"
git status --porcelain --ignore-submodules git status --porcelain --ignore-submodules
@ -103,7 +103,7 @@ function porcelain_check {
# Check that header file dependencies are working correctly by # Check that header file dependencies are working correctly by
# capturing a binary's stat data before and after touching a # capturing a binary's stat data before and after touching a
# header file and re-making. # header file and re-making.
function header_dependency_check { function header_dependency_check() {
STAT1=$(stat app/spdk_tgt/spdk_tgt) STAT1=$(stat app/spdk_tgt/spdk_tgt)
sleep 1 sleep 1
touch lib/nvme/nvme_internal.h touch lib/nvme/nvme_internal.h
@ -116,7 +116,7 @@ function header_dependency_check {
fi fi
} }
function test_make_uninstall { function test_make_uninstall() {
# Create empty file to check if it is not deleted by target uninstall # Create empty file to check if it is not deleted by target uninstall
touch "$SPDK_WORKSPACE/usr/lib/sample_xyz.a" touch "$SPDK_WORKSPACE/usr/lib/sample_xyz.a"
$MAKE $MAKEFLAGS uninstall DESTDIR="$SPDK_WORKSPACE" prefix=/usr $MAKE $MAKEFLAGS uninstall DESTDIR="$SPDK_WORKSPACE" prefix=/usr
@ -127,14 +127,14 @@ function test_make_uninstall {
fi fi
} }
function build_doc { function build_doc() {
$MAKE -C "$rootdir"/doc --no-print-directory $MAKEFLAGS &> "$out"/doxygen.log $MAKE -C "$rootdir"/doc --no-print-directory $MAKEFLAGS &> "$out"/doxygen.log
if [ -s "$out"/doxygen.log ]; then if [ -s "$out"/doxygen.log ]; then
cat "$out"/doxygen.log cat "$out"/doxygen.log
echo "Doxygen errors found!" echo "Doxygen errors found!"
exit 1 exit 1
fi fi
if hash pdflatex 2>/dev/null; then if hash pdflatex 2> /dev/null; then
$MAKE -C "$rootdir"/doc/output/latex --no-print-directory $MAKEFLAGS &>> "$out"/doxygen.log $MAKE -C "$rootdir"/doc/output/latex --no-print-directory $MAKEFLAGS &>> "$out"/doxygen.log
fi fi
mkdir -p "$out"/doc mkdir -p "$out"/doc
@ -149,7 +149,7 @@ function build_doc {
rm -rf "$rootdir"/doc/output rm -rf "$rootdir"/doc/output
} }
function autobuild_test_suite { function autobuild_test_suite() {
run_test "autobuild_check_format" ./scripts/check_format.sh run_test "autobuild_check_format" ./scripts/check_format.sh
run_test "autobuild_external_code" sudo -E $rootdir/test/external_code/test_make.sh $rootdir run_test "autobuild_external_code" sudo -E $rootdir/test/external_code/test_make.sh $rootdir
if [ "$SPDK_TEST_OCF" -eq 1 ]; then if [ "$SPDK_TEST_OCF" -eq 1 ]; then

View File

@ -82,17 +82,15 @@ if [ $(uname -s) = Linux ]; then
# If some OCSSD device is bound to other driver than nvme we won't be able to # If some OCSSD device is bound to other driver than nvme we won't be able to
# discover if it is OCSSD or not so load the kernel driver first. # discover if it is OCSSD or not so load the kernel driver first.
while IFS= read -r -d '' dev; do
while IFS= read -r -d '' dev
do
# Send Open Channel 2.0 Geometry opcode "0xe2" - not supported by NVMe device. # Send Open Channel 2.0 Geometry opcode "0xe2" - not supported by NVMe device.
if nvme admin-passthru $dev --namespace-id=1 --data-len=4096 --opcode=0xe2 --read >/dev/null; then if nvme admin-passthru $dev --namespace-id=1 --data-len=4096 --opcode=0xe2 --read > /dev/null; then
bdf="$(basename $(readlink -e /sys/class/nvme/${dev#/dev/}/device))" bdf="$(basename $(readlink -e /sys/class/nvme/${dev#/dev/}/device))"
echo "INFO: blacklisting OCSSD device: $dev ($bdf)" echo "INFO: blacklisting OCSSD device: $dev ($bdf)"
PCI_BLACKLIST+=" $bdf" PCI_BLACKLIST+=" $bdf"
OCSSD_PCI_DEVICES+=" $bdf" OCSSD_PCI_DEVICES+=" $bdf"
fi fi
done < <(find /dev -maxdepth 1 -regex '/dev/nvme[0-9]+' -print0) done < <(find /dev -maxdepth 1 -regex '/dev/nvme[0-9]+' -print0)
export OCSSD_PCI_DEVICES export OCSSD_PCI_DEVICES
@ -102,8 +100,8 @@ if [ $(uname -s) = Linux ]; then
if [[ -n "$PCI_BLACKLIST" ]]; then if [[ -n "$PCI_BLACKLIST" ]]; then
# shellcheck disable=SC2097,SC2098 # shellcheck disable=SC2097,SC2098
PCI_WHITELIST="$PCI_BLACKLIST" \ PCI_WHITELIST="$PCI_BLACKLIST" \
PCI_BLACKLIST="" \ PCI_BLACKLIST="" \
DRIVER_OVERRIDE="pci-stub" \ DRIVER_OVERRIDE="pci-stub" \
./scripts/setup.sh ./scripts/setup.sh
# Export our blacklist so it will take effect during next setup.sh # Export our blacklist so it will take effect during next setup.sh
@ -161,7 +159,7 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
run_test "json_config" ./test/json_config/json_config.sh run_test "json_config" ./test/json_config/json_config.sh
run_test "alias_rpc" test/json_config/alias_rpc/alias_rpc.sh run_test "alias_rpc" test/json_config/alias_rpc/alias_rpc.sh
run_test "spdkcli_tcp" test/spdkcli/tcp.sh run_test "spdkcli_tcp" test/spdkcli/tcp.sh
run_test "dpdk_mem_utility" test/dpdk_memory_utility/test_dpdk_mem_info.sh run_test "dpdk_mem_utility" test/dpdk_memory_utility/test_dpdk_mem_info.sh
run_test "event" test/event/event.sh run_test "event" test/event/event.sh
if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
@ -231,8 +229,8 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
run_test "spdkcli_nvmf_tcp" ./test/spdkcli/nvmf.sh run_test "spdkcli_nvmf_tcp" ./test/spdkcli/nvmf.sh
run_test "nvmf_identify_passthru" test/nvmf/target/identify_passthru.sh --transport=$SPDK_TEST_NVMF_TRANSPORT run_test "nvmf_identify_passthru" test/nvmf/target/identify_passthru.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "fc" ]; then elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "fc" ]; then
run_test "nvmf_fc" ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT run_test "nvmf_fc" ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
run_test "spdkcli_nvmf_fc" ./test/spdkcli/nvmf.sh run_test "spdkcli_nvmf_fc" ./test/spdkcli/nvmf.sh
else else
echo "unknown NVMe transport, please specify rdma, tcp, or fc." echo "unknown NVMe transport, please specify rdma, tcp, or fc."
exit 1 exit 1
@ -280,10 +278,10 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
run_test "vmd" ./test/vmd/vmd.sh run_test "vmd" ./test/vmd/vmd.sh
fi fi
if [ $SPDK_TEST_REDUCE -eq 1 ]; then if [ $SPDK_TEST_REDUCE -eq 1 ]; then
run_test "compress_qat" ./test/compress/compress.sh "qat" run_test "compress_qat" ./test/compress/compress.sh "qat"
run_test "compress_isal" ./test/compress/compress.sh "isal" run_test "compress_isal" ./test/compress/compress.sh "isal"
fi fi
if [ $SPDK_TEST_OPAL -eq 1 ]; then if [ $SPDK_TEST_OPAL -eq 1 ]; then
run_test "nvme_opal" ./test/nvme/nvme_opal.sh run_test "nvme_opal" ./test/nvme/nvme_opal.sh

View File

@ -26,7 +26,7 @@ mkdir ${base_dir}
cp ${script_dir}/ceph.conf $ceph_conf cp ${script_dir}/ceph.conf $ceph_conf
if [ ! -e $image ]; then if [ ! -e $image ]; then
fallocate -l 4G $image fallocate -l 4G $image
fi fi
mknod ${dev} b 7 200 || true mknod ${dev} b 7 200 || true
@ -39,8 +39,8 @@ echo "Partitioning ${dev}"
${PARTED} ${dev} mktable gpt ${PARTED} ${dev} mktable gpt
sleep 2 sleep 2
${PARTED} ${dev} mkpart primary 0% 2GiB ${PARTED} ${dev} mkpart primary 0% 2GiB
${PARTED} ${dev} mkpart primary 2GiB 100% ${PARTED} ${dev} mkpart primary 2GiB 100%
partno=0 partno=0
echo "Setting name on ${dev}" echo "Setting name on ${dev}"
@ -91,7 +91,7 @@ monmaptool --create --clobber --add a ${mon_ip}:12046 --print ${base_dir}/monmap
sh -c "ulimit -c unlimited && exec ceph-mon --mkfs -c ${ceph_conf} -i a --monmap=${base_dir}/monmap --keyring=${base_dir}/keyring --mon-data=${mon_dir}" sh -c "ulimit -c unlimited && exec ceph-mon --mkfs -c ${ceph_conf} -i a --monmap=${base_dir}/monmap --keyring=${base_dir}/keyring --mon-data=${mon_dir}"
if [ $update_config = true ] ;then if [ $update_config = true ]; then
sed -i 's/mon addr = /mon addr = v2:/g' $ceph_conf sed -i 's/mon addr = /mon addr = v2:/g' $ceph_conf
fi fi
@ -106,13 +106,13 @@ chmod a+r /etc/ceph/ceph.client.admin.keyring
ceph-run sh -c "ulimit -n 16384 && ulimit -c unlimited && exec ceph-mon -c ${ceph_conf} -i a --keyring=${base_dir}/keyring --pid-file=${base_dir}/pid/root@$(hostname).pid --mon-data=${mon_dir}" || true ceph-run sh -c "ulimit -n 16384 && ulimit -c unlimited && exec ceph-mon -c ${ceph_conf} -i a --keyring=${base_dir}/keyring --pid-file=${base_dir}/pid/root@$(hostname).pid --mon-data=${mon_dir}" || true
# after ceph-mon creation, ceph -s should work. # after ceph-mon creation, ceph -s should work.
if [ $update_config = true ] ;then if [ $update_config = true ]; then
# start to get whole log. # start to get whole log.
ceph-conf --name mon.a --show-config-value log_file ceph-conf --name mon.a --show-config-value log_file
# add fsid to ceph config file. # add fsid to ceph config file.
fsid=$(ceph -s | grep id |awk '{print $2}') fsid=$(ceph -s | grep id | awk '{print $2}')
sed -i 's/perf = true/perf = true\n\tfsid = '$fsid' \n/g' $ceph_conf sed -i 's/perf = true/perf = true\n\tfsid = '$fsid' \n/g' $ceph_conf
# unify the filesystem with the old versions. # unify the filesystem with the old versions.
sed -i 's/perf = true/perf = true\n\tosd objectstore = filestore\n/g' $ceph_conf sed -i 's/perf = true/perf = true\n\tosd objectstore = filestore\n/g' $ceph_conf

View File

@ -6,16 +6,16 @@ cd $BASEDIR
# exit on errors # exit on errors
set -e set -e
if ! hash nproc 2>/dev/null; then if ! hash nproc 2> /dev/null; then
function nproc() { function nproc() {
echo 8 echo 8
} }
fi fi
function version_lt() { function version_lt() {
[ $( echo -e "$1\n$2" | sort -V | head -1 ) != "$1" ] [ $(echo -e "$1\n$2" | sort -V | head -1) != "$1" ]
} }
rc=0 rc=0
@ -30,13 +30,13 @@ while read -r perm _res0 _res1 path; do
fname=$(basename -- "$path") fname=$(basename -- "$path")
case ${fname##*.} in case ${fname##*.} in
c|h|cpp|cc|cxx|hh|hpp|md|html|js|json|svg|Doxyfile|yml|LICENSE|README|conf|in|Makefile|mk|gitignore|go|txt) c | h | cpp | cc | cxx | hh | hpp | md | html | js | json | svg | Doxyfile | yml | LICENSE | README | conf | in | Makefile | mk | gitignore | go | txt)
# These file types should never be executable # These file types should never be executable
if [ "$perm" -eq 100755 ]; then if [ "$perm" -eq 100755 ]; then
echo "ERROR: $path is marked executable but is a code file." echo "ERROR: $path is marked executable but is a code file."
rc=1 rc=1
fi fi
;; ;;
*) *)
shebang=$(head -n 1 $path | cut -c1-3) shebang=$(head -n 1 $path | cut -c1-3)
@ -54,7 +54,7 @@ while read -r perm _res0 _res1 path; do
rc=1 rc=1
fi fi
fi fi
;; ;;
esac esac
done <<< "$(git grep -I --name-only --untracked -e . | git ls-files -s)" done <<< "$(git grep -I --name-only --untracked -e . | git ls-files -s)"
@ -65,8 +65,7 @@ fi
if hash astyle; then if hash astyle; then
echo -n "Checking coding style..." echo -n "Checking coding style..."
if [ "$(astyle -V)" \< "Artistic Style Version 3" ] if [ "$(astyle -V)" \< "Artistic Style Version 3" ]; then
then
echo -n " Your astyle version is too old so skipping coding style checks. Please update astyle to at least 3.0.1 version..." echo -n " Your astyle version is too old so skipping coding style checks. Please update astyle to at least 3.0.1 version..."
else else
rm -f astyle.log rm -f astyle.log
@ -75,9 +74,9 @@ if hash astyle; then
# as-is to enable ongoing work to synch with a generic upstream DPDK vhost library, # as-is to enable ongoing work to synch with a generic upstream DPDK vhost library,
# rather than making diffs more complicated by a lot of changes to follow SPDK # rather than making diffs more complicated by a lot of changes to follow SPDK
# coding standards. # coding standards.
git ls-files '*.[ch]' '*.cpp' '*.cc' '*.cxx' '*.hh' '*.hpp' | \ git ls-files '*.[ch]' '*.cpp' '*.cc' '*.cxx' '*.hh' '*.hpp' \
grep -v rte_vhost | grep -v cpp_headers | \ | grep -v rte_vhost | grep -v cpp_headers \
xargs -P$(nproc) -n10 astyle --options=.astylerc >> astyle.log | xargs -P$(nproc) -n10 astyle --options=.astylerc >> astyle.log
if grep -q "^Formatted" astyle.log; then if grep -q "^Formatted" astyle.log; then
echo " errors detected" echo " errors detected"
git diff git diff
@ -96,7 +95,7 @@ else
echo "You do not have astyle installed so your code style is not being checked!" echo "You do not have astyle installed so your code style is not being checked!"
fi fi
GIT_VERSION=$( git --version | cut -d' ' -f3 ) GIT_VERSION=$(git --version | cut -d' ' -f3)
if version_lt "1.9.5" "${GIT_VERSION}"; then if version_lt "1.9.5" "${GIT_VERSION}"; then
# git <1.9.5 doesn't support pathspec magic exclude # git <1.9.5 doesn't support pathspec magic exclude
@ -171,8 +170,8 @@ rm -f badcunit.log
echo -n "Checking blank lines at end of file..." echo -n "Checking blank lines at end of file..."
if ! git grep -I -l -e . -z './*' ':!*.patch' | \ if ! git grep -I -l -e . -z './*' ':!*.patch' \
xargs -0 -P$(nproc) -n1 scripts/eofnl > eofnl.log; then | xargs -0 -P$(nproc) -n1 scripts/eofnl > eofnl.log; then
echo " Incorrect end-of-file formatting detected" echo " Incorrect end-of-file formatting detected"
cat eofnl.log cat eofnl.log
rc=1 rc=1
@ -203,9 +202,9 @@ else
fi fi
rm -f scripts/includes.log rm -f scripts/includes.log
if hash pycodestyle 2>/dev/null; then if hash pycodestyle 2> /dev/null; then
PEP8=pycodestyle PEP8=pycodestyle
elif hash pep8 2>/dev/null; then elif hash pep8 2> /dev/null; then
PEP8=pep8 PEP8=pep8
fi fi
@ -228,7 +227,7 @@ else
echo "You do not have pycodestyle or pep8 installed so your Python style is not being checked!" echo "You do not have pycodestyle or pep8 installed so your Python style is not being checked!"
fi fi
if hash shellcheck 2>/dev/null; then if hash shellcheck 2> /dev/null; then
echo -n "Checking Bash style..." echo -n "Checking Bash style..."
shellcheck_v=$(shellcheck --version | grep -P "version: [0-9\.]+" | cut -d " " -f2) shellcheck_v=$(shellcheck --version | grep -P "version: [0-9\.]+" | cut -d " " -f2)

View File

@ -10,7 +10,7 @@ function pci_can_use() {
local i local i
# The '\ ' part is important # The '\ ' part is important
if [[ " $PCI_BLACKLIST " =~ \ $1\ ]] ; then if [[ " $PCI_BLACKLIST " =~ \ $1\ ]]; then
return 1 return 1
fi fi
@ -20,7 +20,7 @@ function pci_can_use() {
fi fi
for i in $PCI_WHITELIST; do for i in $PCI_WHITELIST; do
if [ "$i" == "$1" ] ; then if [ "$i" == "$1" ]; then
return 0 return 0
fi fi
done done
@ -28,7 +28,7 @@ function pci_can_use() {
return 1 return 1
} }
cache_pci_init () { cache_pci_init() {
local -gA pci_bus_cache local -gA pci_bus_cache
[[ -z ${pci_bus_cache[*]} || $CMD == reset ]] || return 1 [[ -z ${pci_bus_cache[*]} || $CMD == reset ]] || return 1
@ -36,22 +36,22 @@ cache_pci_init () {
pci_bus_cache=() pci_bus_cache=()
} }
cache_pci () { cache_pci() {
local pci=$1 class=$2 vendor=$3 device=$4 local pci=$1 class=$2 vendor=$3 device=$4
if [[ -n $class ]]; then if [[ -n $class ]]; then
class=0x${class/0x} class=0x${class/0x/}
pci_bus_cache["$class"]="${pci_bus_cache["$class"]:+${pci_bus_cache["$class"]} }$pci" pci_bus_cache["$class"]="${pci_bus_cache["$class"]:+${pci_bus_cache["$class"]} }$pci"
fi fi
if [[ -n $vendor && -n $device ]]; then if [[ -n $vendor && -n $device ]]; then
vendor=0x${vendor/0x} device=0x${device/0x} vendor=0x${vendor/0x/} device=0x${device/0x/}
pci_bus_cache["$vendor"]="${pci_bus_cache["$vendor"]:+${pci_bus_cache["$vendor"]} }$pci" pci_bus_cache["$vendor"]="${pci_bus_cache["$vendor"]:+${pci_bus_cache["$vendor"]} }$pci"
pci_bus_cache["$device"]="${pci_bus_cache["$device"]:+${pci_bus_cache["$device"]} }$pci" pci_bus_cache["$device"]="${pci_bus_cache["$device"]:+${pci_bus_cache["$device"]} }$pci"
pci_bus_cache["$vendor:$device"]="${pci_bus_cache["$vendor:$device"]:+${pci_bus_cache["$vendor:$device"]} }$pci" pci_bus_cache["$vendor:$device"]="${pci_bus_cache["$vendor:$device"]:+${pci_bus_cache["$vendor:$device"]} }$pci"
fi fi
} }
cache_pci_bus_sysfs () { cache_pci_bus_sysfs() {
[[ -e /sys/bus/pci/devices ]] || return 1 [[ -e /sys/bus/pci/devices ]] || return 1
cache_pci_init || return 0 cache_pci_init || return 0
@ -60,13 +60,13 @@ cache_pci_bus_sysfs () {
local class vendor device local class vendor device
for pci in /sys/bus/pci/devices/*; do for pci in /sys/bus/pci/devices/*; do
class=$(<"$pci/class") vendor=$(<"$pci/vendor") device=$(<"$pci/device") class=$(< "$pci/class") vendor=$(< "$pci/vendor") device=$(< "$pci/device")
cache_pci "${pci##*/}" "$class" "$vendor" "$device" cache_pci "${pci##*/}" "$class" "$vendor" "$device"
done done
} }
cache_pci_bus_lspci () { cache_pci_bus_lspci() {
hash lspci 2>/dev/null || return 1 hash lspci 2> /dev/null || return 1
cache_pci_init || return 0 cache_pci_init || return 0
@ -86,8 +86,8 @@ cache_pci_bus_lspci () {
done < <(lspci -Dnmm) done < <(lspci -Dnmm)
} }
cache_pci_bus_pciconf () { cache_pci_bus_pciconf() {
hash pciconf 2>/dev/null || return 1 hash pciconf 2> /dev/null || return 1
cache_pci_init || return 0 cache_pci_init || return 0
@ -95,25 +95,25 @@ cache_pci_bus_pciconf () {
local pci domain bus device function local pci domain bus device function
while read -r pci class _ vd _; do while read -r pci class _ vd _; do
IFS=":" read -r domain bus device function _ <<<"${pci##*pci}" IFS=":" read -r domain bus device function _ <<< "${pci##*pci}"
pci=$(printf '%04x:%02x:%02x:%x' \ pci=$(printf '%04x:%02x:%02x:%x' \
"$domain" "$bus" "$device" "$function") "$domain" "$bus" "$device" "$function")
class=$(printf '0x%06x' $(( class ))) class=$(printf '0x%06x' $((class)))
vendor=$(printf '0x%04x' $(( vd & 0xffff ))) vendor=$(printf '0x%04x' $((vd & 0xffff)))
device=$(printf '0x%04x' $(( (vd >> 16) & 0xffff ))) device=$(printf '0x%04x' $(((vd >> 16) & 0xffff)))
cache_pci "$pci" "$class" "$vendor" "$device" cache_pci "$pci" "$class" "$vendor" "$device"
done < <(pciconf -l) done < <(pciconf -l)
} }
cache_pci_bus () { cache_pci_bus() {
case "$(uname -s)" in case "$(uname -s)" in
Linux) cache_pci_bus_lspci || cache_pci_bus_sysfs ;; Linux) cache_pci_bus_lspci || cache_pci_bus_sysfs ;;
FreeBSD) cache_pci_bus_pciconf ;; FreeBSD) cache_pci_bus_pciconf ;;
esac esac
} }
iter_all_pci_sysfs () { iter_all_pci_sysfs() {
cache_pci_bus_sysfs || return 1 cache_pci_bus_sysfs || return 1
# default to class of the nvme devices # default to class of the nvme devices
@ -121,9 +121,9 @@ iter_all_pci_sysfs () {
local pci pcis local pci pcis
[[ -n ${pci_bus_cache["$find"]} ]] || return 0 [[ -n ${pci_bus_cache["$find"]} ]] || return 0
read -ra pcis <<<"${pci_bus_cache["$find"]}" read -ra pcis <<< "${pci_bus_cache["$find"]}"
if (( findx )); then if ((findx)); then
printf '%s\n' "${pcis[@]::findx}" printf '%s\n' "${pcis[@]::findx}"
else else
printf '%s\n' "${pcis[@]}" printf '%s\n' "${pcis[@]}"
@ -139,22 +139,22 @@ function iter_all_pci_class_code() {
subclass="$(printf %02x $((0x$2)))" subclass="$(printf %02x $((0x$2)))"
progif="$(printf %02x $((0x$3)))" progif="$(printf %02x $((0x$3)))"
if hash lspci &>/dev/null; then if hash lspci &> /dev/null; then
if [ "$progif" != "00" ]; then if [ "$progif" != "00" ]; then
lspci -mm -n -D | \ lspci -mm -n -D \
grep -i -- "-p${progif}" | \ | grep -i -- "-p${progif}" \
awk -v cc="\"${class}${subclass}\"" -F " " \ | awk -v cc="\"${class}${subclass}\"" -F " " \
'{if (cc ~ $2) print $1}' | tr -d '"' '{if (cc ~ $2) print $1}' | tr -d '"'
else else
lspci -mm -n -D | \ lspci -mm -n -D \
awk -v cc="\"${class}${subclass}\"" -F " " \ | awk -v cc="\"${class}${subclass}\"" -F " " \
'{if (cc ~ $2) print $1}' | tr -d '"' '{if (cc ~ $2) print $1}' | tr -d '"'
fi fi
elif hash pciconf &>/dev/null; then elif hash pciconf &> /dev/null; then
local addr=($(pciconf -l | grep -i "class=0x${class}${subclass}${progif}" | \ local addr=($(pciconf -l | grep -i "class=0x${class}${subclass}${progif}" \
cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' ')) | cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]} printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
elif iter_all_pci_sysfs "$(printf '0x%06x' $(( 0x$progif | 0x$subclass << 8 | 0x$class << 16 )))"; then elif iter_all_pci_sysfs "$(printf '0x%06x' $((0x$progif | 0x$subclass << 8 | 0x$class << 16)))"; then
: :
else else
echo "Missing PCI enumeration utility" >&2 echo "Missing PCI enumeration utility" >&2
@ -169,12 +169,12 @@ function iter_all_pci_dev_id() {
ven_id="$(printf %04x $((0x$1)))" ven_id="$(printf %04x $((0x$1)))"
dev_id="$(printf %04x $((0x$2)))" dev_id="$(printf %04x $((0x$2)))"
if hash lspci &>/dev/null; then if hash lspci &> /dev/null; then
lspci -mm -n -D | awk -v ven="\"$ven_id\"" -v dev="\"${dev_id}\"" -F " " \ lspci -mm -n -D | awk -v ven="\"$ven_id\"" -v dev="\"${dev_id}\"" -F " " \
'{if (ven ~ $3 && dev ~ $4) print $1}' | tr -d '"' '{if (ven ~ $3 && dev ~ $4) print $1}' | tr -d '"'
elif hash pciconf &>/dev/null; then elif hash pciconf &> /dev/null; then
local addr=($(pciconf -l | grep -i "chip=0x${dev_id}${ven_id}" | \ local addr=($(pciconf -l | grep -i "chip=0x${dev_id}${ven_id}" \
cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' ')) | cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]} printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
elif iter_all_pci_sysfs "0x$ven_id:0x$dev_id"; then elif iter_all_pci_sysfs "0x$ven_id:0x$dev_id"; then
: :

View File

@ -2,13 +2,11 @@
set -e set -e
function err() function err() {
{
echo "$@" >&2 echo "$@" >&2
} }
function usage() function usage() {
{
err "Detect compiler and linker versions, generate mk/cc.mk" err "Detect compiler and linker versions, generate mk/cc.mk"
err "" err ""
err "Usage: ./detect_cc.sh [OPTION]..." err "Usage: ./detect_cc.sh [OPTION]..."
@ -24,11 +22,9 @@ function usage()
err " --cross-prefix=prefix Use the given prefix for the cross compiler toolchain" err " --cross-prefix=prefix Use the given prefix for the cross compiler toolchain"
} }
for i in "$@"; do for i in "$@"; do
case "$i" in case "$i" in
-h|--help) -h | --help)
usage usage
exit 0 exit 0
;; ;;
@ -64,6 +60,7 @@ for i in "$@"; do
err "Unrecognized option $i" err "Unrecognized option $i"
usage usage
exit 1 exit 1
;;
esac esac
done done
@ -105,6 +102,7 @@ case "$LD_TYPE" in
*) *)
err "Unsupported linker: $LD" err "Unsupported linker: $LD"
exit 1 exit 1
;;
esac esac
CCAR="ar" CCAR="ar"
@ -128,7 +126,7 @@ if [ -n "$CROSS_PREFIX" ]; then
# Try to fix this automatically. Maybe the user set CROSS_PREFIX but not CC. # Try to fix this automatically. Maybe the user set CROSS_PREFIX but not CC.
CC=$CROSS_PREFIX-$CC CC=$CROSS_PREFIX-$CC
if hash $CC 2>/dev/null; then if hash $CC 2> /dev/null; then
expected_prefix=$($CC -dumpmachine) expected_prefix=$($CC -dumpmachine)
if [ "$expected_prefix" = "$CROSS_PREFIX" ]; then if [ "$expected_prefix" = "$CROSS_PREFIX" ]; then
@ -151,7 +149,7 @@ if [ -n "$CROSS_PREFIX" ]; then
# Try to fix this automatically. Maybe the user set CROSS_PREFIX but not CXX. # Try to fix this automatically. Maybe the user set CROSS_PREFIX but not CXX.
CXX=$CROSS_PREFIX-$CXX CXX=$CROSS_PREFIX-$CXX
if hash $CXX 2>/dev/null; then if hash $CXX 2> /dev/null; then
expected_prefix=$($CXX -dumpmachine) expected_prefix=$($CXX -dumpmachine)
if [ "$expected_prefix" = "$CROSS_PREFIX" ]; then if [ "$expected_prefix" = "$CROSS_PREFIX" ]; then

View File

@ -4,7 +4,7 @@ set -e
rootdir=$(readlink -f $(dirname $0))/.. rootdir=$(readlink -f $(dirname $0))/..
function usage { function usage() {
echo "Usage: [-j] $0 -n BDEV_NAME -d BASE_BDEV [-u UUID] [-c CACHE]" echo "Usage: [-j] $0 -n BDEV_NAME -d BASE_BDEV [-u UUID] [-c CACHE]"
echo "UUID is required when restoring device state" echo "UUID is required when restoring device state"
echo echo
@ -14,8 +14,7 @@ function usage {
echo "CACHE - name of the bdev to be used as write buffer cache" echo "CACHE - name of the bdev to be used as write buffer cache"
} }
function create_json_config() function create_json_config() {
{
echo "{" echo "{"
echo '"subsystem": "bdev",' echo '"subsystem": "bdev",'
echo '"config": [' echo '"config": ['
@ -40,14 +39,18 @@ uuid=00000000-0000-0000-0000-000000000000
while getopts ":c:d:hn:u:" arg; do while getopts ":c:d:hn:u:" arg; do
case "$arg" in case "$arg" in
n) name=$OPTARG ;; n) name=$OPTARG ;;
d) base_bdev=$OPTARG ;; d) base_bdev=$OPTARG ;;
u) uuid=$OPTARG ;; u) uuid=$OPTARG ;;
c) cache=$OPTARG ;; c) cache=$OPTARG ;;
h) usage h)
exit 0 ;; usage
*) usage exit 0
exit 1 ;; ;;
*)
usage
exit 1
;;
esac esac
done done

View File

@ -5,22 +5,18 @@ set -e
rootdir=$(readlink -f $(dirname $0))/.. rootdir=$(readlink -f $(dirname $0))/..
source "$rootdir/scripts/common.sh" source "$rootdir/scripts/common.sh"
function create_classic_config() function create_classic_config() {
{
echo "[Nvme]" echo "[Nvme]"
for (( i=0; i < ${#bdfs[@]}; i++)) for ((i = 0; i < ${#bdfs[@]}; i++)); do
do
echo " TransportID \"trtype:PCIe traddr:${bdfs[i]}\" Nvme$i" echo " TransportID \"trtype:PCIe traddr:${bdfs[i]}\" Nvme$i"
done done
} }
function create_json_config() function create_json_config() {
{
echo "{" echo "{"
echo '"subsystem": "bdev",' echo '"subsystem": "bdev",'
echo '"config": [' echo '"config": ['
for (( i=0; i < ${#bdfs[@]}; i++)) for ((i = 0; i < ${#bdfs[@]}; i++)); do
do
echo '{' echo '{'
echo '"params": {' echo '"params": {'
echo '"trtype": "PCIe",' echo '"trtype": "PCIe",'
@ -28,7 +24,7 @@ function create_json_config()
echo "\"traddr\": \"${bdfs[i]}\"" echo "\"traddr\": \"${bdfs[i]}\""
echo '},' echo '},'
echo '"method": "bdev_nvme_attach_controller"' echo '"method": "bdev_nvme_attach_controller"'
if [ -z ${bdfs[i+1]} ]; then if [ -z ${bdfs[i + 1]} ]; then
echo '}' echo '}'
else else
echo '},' echo '},'

View File

@ -3,8 +3,7 @@
set -e set -e
function usage() function usage() {
{
echo "" echo ""
echo "This script is intended to automate the installation of package dependencies to build SPDK." echo "This script is intended to automate the installation of package dependencies to build SPDK."
echo "Please run this script as root user or with sudo -E." echo "Please run this script as root user or with sudo -E."
@ -21,8 +20,7 @@ function usage()
exit 0 exit 0
} }
function install_all_dependencies () function install_all_dependencies() {
{
INSTALL_DEV_TOOLS=true INSTALL_DEV_TOOLS=true
INSTALL_PMEM=true INSTALL_PMEM=true
INSTALL_FUSE=true INSTALL_FUSE=true
@ -40,27 +38,31 @@ INSTALL_DOCS=false
while getopts 'abdfhipr-:' optchar; do while getopts 'abdfhipr-:' optchar; do
case "$optchar" in case "$optchar" in
-) -)
case "$OPTARG" in case "$OPTARG" in
help) usage;; help) usage ;;
all) install_all_dependencies;; all) install_all_dependencies ;;
developer-tools) INSTALL_DEV_TOOLS=true;; developer-tools) INSTALL_DEV_TOOLS=true ;;
pmem) INSTALL_PMEM=true;; pmem) INSTALL_PMEM=true ;;
fuse) INSTALL_FUSE=true;; fuse) INSTALL_FUSE=true ;;
rdma) INSTALL_RDMA=true;; rdma) INSTALL_RDMA=true ;;
docs) INSTALL_DOCS=true;; docs) INSTALL_DOCS=true ;;
*) echo "Invalid argument '$OPTARG'" *)
usage;; echo "Invalid argument '$OPTARG'"
esac usage
;; ;;
h) usage;; esac
a) install_all_dependencies;; ;;
d) INSTALL_DEV_TOOLS=true;; h) usage ;;
p) INSTALL_PMEM=true;; a) install_all_dependencies ;;
f) INSTALL_FUSE=true;; d) INSTALL_DEV_TOOLS=true ;;
r) INSTALL_RDMA=true;; p) INSTALL_PMEM=true ;;
b) INSTALL_DOCS=true;; f) INSTALL_FUSE=true ;;
*) echo "Invalid argument '$OPTARG'" r) INSTALL_RDMA=true ;;
usage;; b) INSTALL_DOCS=true ;;
*)
echo "Invalid argument '$OPTARG'"
usage
;;
esac esac
done done
@ -174,7 +176,7 @@ elif [ -f /etc/debian_version ]; then
fi fi
if [[ $INSTALL_FUSE == "true" ]]; then if [[ $INSTALL_FUSE == "true" ]]; then
# Additional dependencies for FUSE and NVMe-CUSE # Additional dependencies for FUSE and NVMe-CUSE
if [[ $NAME == "Ubuntu" ]] && (( VERSION_ID_NUM > 1400 && VERSION_ID_NUM < 1900 )); then if [[ $NAME == "Ubuntu" ]] && ((VERSION_ID_NUM > 1400 && VERSION_ID_NUM < 1900)); then
echo "Ubuntu $VERSION_ID does not have libfuse3-dev in mainline repository." echo "Ubuntu $VERSION_ID does not have libfuse3-dev in mainline repository."
echo "You can install it manually" echo "You can install it manually"
else else
@ -220,7 +222,7 @@ elif [ -f /etc/SuSE-release ] || [ -f /etc/SUSE-brand ]; then
# Additional dependencies for building docs # Additional dependencies for building docs
zypper install -y doxygen mscgen graphviz zypper install -y doxygen mscgen graphviz
fi fi
elif [ $(uname -s) = "FreeBSD" ] ; then elif [ $(uname -s) = "FreeBSD" ]; then
# Minimal install # Minimal install
pkg install -y gmake cunit openssl git bash misc/e2fsprogs-libuuid python \ pkg install -y gmake cunit openssl git bash misc/e2fsprogs-libuuid python \
ncurses ncurses

View File

@ -20,7 +20,7 @@ function configure_performance() {
echo -n "Moving all interrupts off of core 0..." echo -n "Moving all interrupts off of core 0..."
count=$(($(nproc) / 4)) count=$(($(nproc) / 4))
cpumask="e" cpumask="e"
for ((i=1; i<count; i++)); do for ((i = 1; i < count; i++)); do
if [ $((i % 8)) -eq 0 ]; then if [ $((i % 8)) -eq 0 ]; then
cpumask=",$cpumask" cpumask=",$cpumask"
fi fi

View File

@ -12,7 +12,7 @@ bad_driver=true
driver_to_bind=uio_pci_generic driver_to_bind=uio_pci_generic
num_vfs=16 num_vfs=16
qat_pci_bdfs=( $(lspci -Dd:37c8 | awk '{print $1}') ) qat_pci_bdfs=($(lspci -Dd:37c8 | awk '{print $1}'))
if [ ${#qat_pci_bdfs[@]} -eq 0 ]; then if [ ${#qat_pci_bdfs[@]} -eq 0 ]; then
echo "No QAT devices found. Exiting" echo "No QAT devices found. Exiting"
exit 0 exit 0
@ -48,8 +48,8 @@ done
# Confirm we have all of the virtual functions we asked for. # Confirm we have all of the virtual functions we asked for.
qat_vf_bdfs=( $(lspci -Dd:37c9 | awk '{print $1}') ) qat_vf_bdfs=($(lspci -Dd:37c9 | awk '{print $1}'))
if (( ${#qat_vf_bdfs[@]} != ${#qat_pci_bdfs[@]}*num_vfs )); then if ((${#qat_vf_bdfs[@]} != ${#qat_pci_bdfs[@]} * num_vfs)); then
echo "Failed to prepare the VFs. Aborting" echo "Failed to prepare the VFs. Aborting"
exit 1 exit 1
fi fi

View File

@ -5,15 +5,17 @@ set -e
rootdir=$(readlink -f $(dirname $0))/.. rootdir=$(readlink -f $(dirname $0))/..
source "$rootdir/scripts/common.sh" source "$rootdir/scripts/common.sh"
function usage() function usage() {
{
if [ $(uname) = Linux ]; then if [ $(uname) = Linux ]; then
options="[config|reset|status|cleanup|help]" options="[config|reset|status|cleanup|help]"
else else
options="[config|reset|help]" options="[config|reset|help]"
fi fi
[[ -n $2 ]] && ( echo "$2"; echo ""; ) [[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Helper script for allocating hugepages and binding NVMe, I/OAT, VMD and Virtio devices" echo "Helper script for allocating hugepages and binding NVMe, I/OAT, VMD and Virtio devices"
echo "to a generic VFIO kernel driver. If VFIO is not available on the system, this script" echo "to a generic VFIO kernel driver. If VFIO is not available on the system, this script"
echo "will fall back to UIO. NVMe and Virtio devices with active mountpoints will be ignored." echo "will fall back to UIO. NVMe and Virtio devices with active mountpoints will be ignored."
@ -63,15 +65,15 @@ function usage()
# back that with a /sys/modules. We also check # back that with a /sys/modules. We also check
# /sys/bus/pci/drivers/ as neither lsmod nor /sys/modules might # /sys/bus/pci/drivers/ as neither lsmod nor /sys/modules might
# contain needed info (like in Fedora-like OS). # contain needed info (like in Fedora-like OS).
function check_for_driver { function check_for_driver() {
if lsmod | grep -q ${1//-/_}; then if lsmod | grep -q ${1//-/_}; then
return 1 return 1
fi fi
if [[ -d /sys/module/${1} || \ if [[ -d /sys/module/${1} || -d \
-d /sys/module/${1//-/_} || \ /sys/module/${1//-/_} || -d \
-d /sys/bus/pci/drivers/${1} || \ /sys/bus/pci/drivers/${1} || -d \
-d /sys/bus/pci/drivers/${1//-/_} ]]; then /sys/bus/pci/drivers/${1//-/_} ]]; then
return 2 return 2
fi fi
return 0 return 0
@ -137,7 +139,7 @@ function linux_hugetlbfs_mounts() {
mount | grep ' type hugetlbfs ' | awk '{ print $3 }' mount | grep ' type hugetlbfs ' | awk '{ print $3 }'
} }
function get_nvme_name_from_bdf { function get_nvme_name_from_bdf() {
local blknames=() local blknames=()
set +e set +e
@ -157,7 +159,7 @@ function get_nvme_name_from_bdf {
printf '%s\n' "${blknames[@]}" printf '%s\n' "${blknames[@]}"
} }
function get_virtio_names_from_bdf { function get_virtio_names_from_bdf() {
blk_devs=$(lsblk --nodeps --output NAME) blk_devs=$(lsblk --nodeps --output NAME)
virtio_names=() virtio_names=()
@ -170,7 +172,7 @@ function get_virtio_names_from_bdf {
eval "$2=( " "${virtio_names[@]}" " )" eval "$2=( " "${virtio_names[@]}" " )"
} }
function configure_linux_pci { function configure_linux_pci() {
local driver_path="" local driver_path=""
driver_name="" driver_name=""
if [[ -n "${DRIVER_OVERRIDE}" ]]; then if [[ -n "${DRIVER_OVERRIDE}" ]]; then
@ -186,11 +188,11 @@ function configure_linux_pci {
if [[ "$driver_name" = "igb_uio" ]]; then if [[ "$driver_name" = "igb_uio" ]]; then
modprobe uio modprobe uio
fi fi
elif [[ -n "$(ls /sys/kernel/iommu_groups)" || \ elif [[ -n "$(ls /sys/kernel/iommu_groups)" || (-e \
(-e /sys/module/vfio/parameters/enable_unsafe_noiommu_mode && \ /sys/module/vfio/parameters/enable_unsafe_noiommu_mode && \
"$(cat /sys/module/vfio/parameters/enable_unsafe_noiommu_mode)" == "Y") ]]; then "$(cat /sys/module/vfio/parameters/enable_unsafe_noiommu_mode)" == "Y") ]]; then
driver_name=vfio-pci driver_name=vfio-pci
elif modinfo uio_pci_generic >/dev/null 2>&1; then elif modinfo uio_pci_generic > /dev/null 2>&1; then
driver_name=uio_pci_generic driver_name=uio_pci_generic
elif [[ -r "$rootdir/dpdk/build/kmod/igb_uio.ko" ]]; then elif [[ -r "$rootdir/dpdk/build/kmod/igb_uio.ko" ]]; then
driver_path="$rootdir/dpdk/build/kmod/igb_uio.ko" driver_path="$rootdir/dpdk/build/kmod/igb_uio.ko"
@ -240,10 +242,9 @@ function configure_linux_pci {
TMP=$(mktemp) TMP=$(mktemp)
#collect all the device_id info of ioat devices. #collect all the device_id info of ioat devices.
grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \ grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP | awk -F"x" '{print $2}' > $TMP
while IFS= read -r dev_id while IFS= read -r dev_id; do
do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if ! pci_can_use $bdf; then if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device" pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device"
@ -255,33 +256,31 @@ function configure_linux_pci {
done < $TMP done < $TMP
rm $TMP rm $TMP
# IDXD # IDXD
TMP=$(mktemp) TMP=$(mktemp)
#collect all the device_id info of idxd devices. #collect all the device_id info of idxd devices.
grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \ grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP | awk -F"x" '{print $2}' > $TMP
while IFS= read -r dev_id while IFS= read -r dev_id; do
do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do if ! pci_can_use $bdf; then
if ! pci_can_use $bdf; then pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device"
pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device" continue
continue fi
fi
linux_bind_driver "$bdf" "$driver_name" linux_bind_driver "$bdf" "$driver_name"
done done
done < $TMP done < $TMP
rm $TMP rm $TMP
# virtio # virtio
TMP=$(mktemp) TMP=$(mktemp)
#collect all the device_id info of virtio devices. #collect all the device_id info of virtio devices.
grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \ grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP | awk -F"x" '{print $2}' > $TMP
while IFS= read -r dev_id while IFS= read -r dev_id; do
do
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
if ! pci_can_use $bdf; then if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at $bdf" pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at $bdf"
@ -305,10 +304,9 @@ function configure_linux_pci {
TMP=$(mktemp) TMP=$(mktemp)
#collect all the device_id info of vmd devices. #collect all the device_id info of vmd devices.
grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \ grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP | awk -F"x" '{print $2}' > $TMP
while IFS= read -r dev_id while IFS= read -r dev_id; do
do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if [[ -z "$PCI_WHITELIST" ]] || ! pci_can_use $bdf; then if [[ -z "$PCI_WHITELIST" ]] || ! pci_can_use $bdf; then
echo "Skipping un-whitelisted VMD device at $bdf" echo "Skipping un-whitelisted VMD device at $bdf"
@ -316,7 +314,7 @@ function configure_linux_pci {
fi fi
linux_bind_driver "$bdf" "$driver_name" linux_bind_driver "$bdf" "$driver_name"
echo " VMD generic kdrv: " "$bdf" "$driver_name" echo " VMD generic kdrv: " "$bdf" "$driver_name"
done done
done < $TMP done < $TMP
rm $TMP rm $TMP
@ -324,7 +322,7 @@ function configure_linux_pci {
echo "1" > "/sys/bus/pci/rescan" echo "1" > "/sys/bus/pci/rescan"
} }
function cleanup_linux { function cleanup_linux() {
shopt -s extglob nullglob shopt -s extglob nullglob
dirs_to_clean="" dirs_to_clean=""
dirs_to_clean="$(echo {/var/run,/tmp}/dpdk/spdk{,_pid}+([0-9])) " dirs_to_clean="$(echo {/var/run,/tmp}/dpdk/spdk{,_pid}+([0-9])) "
@ -338,12 +336,12 @@ function cleanup_linux {
done done
shopt -u extglob nullglob shopt -u extglob nullglob
files_to_clean+="$(ls -1 /dev/shm/* | \ files_to_clean+="$(ls -1 /dev/shm/* \
grep -E '(spdk_tgt|iscsi|vhost|nvmf|rocksdb|bdevio|bdevperf|vhost_fuzz|nvme_fuzz)_trace|spdk_iscsi_conns' || true) " | grep -E '(spdk_tgt|iscsi|vhost|nvmf|rocksdb|bdevio|bdevperf|vhost_fuzz|nvme_fuzz)_trace|spdk_iscsi_conns' || true) "
files_to_clean="$(readlink -e assert_not_empty $files_to_clean || true)" files_to_clean="$(readlink -e assert_not_empty $files_to_clean || true)"
if [[ -z "$files_to_clean" ]]; then if [[ -z "$files_to_clean" ]]; then
echo "Clean" echo "Clean"
return 0; return 0
fi fi
shopt -s extglob shopt -s extglob
@ -368,19 +366,19 @@ function cleanup_linux {
done done
for dir in $dirs_to_clean; do for dir in $dirs_to_clean; do
if ! echo "$opened_files" | grep -E -q "^$dir\$"; then if ! echo "$opened_files" | grep -E -q "^$dir\$"; then
echo "Removing: $dir" echo "Removing: $dir"
rmdir $dir rmdir $dir
else else
echo "Still open: $dir" echo "Still open: $dir"
fi fi
done done
echo "Clean" echo "Clean"
unset dirs_to_clean files_to_clean opened_files unset dirs_to_clean files_to_clean opened_files
} }
function configure_linux { function configure_linux() {
configure_linux_pci configure_linux_pci
hugetlbfs_mounts=$(linux_hugetlbfs_mounts) hugetlbfs_mounts=$(linux_hugetlbfs_mounts)
@ -415,8 +413,8 @@ function configure_linux {
fi fi
MEMLOCK_AMNT=$(ulimit -l) MEMLOCK_AMNT=$(ulimit -l)
if [ "$MEMLOCK_AMNT" != "unlimited" ] ; then if [ "$MEMLOCK_AMNT" != "unlimited" ]; then
MEMLOCK_MB=$(( MEMLOCK_AMNT / 1024 )) MEMLOCK_MB=$((MEMLOCK_AMNT / 1024))
echo "" echo ""
echo "Current user memlock limit: ${MEMLOCK_MB} MB" echo "Current user memlock limit: ${MEMLOCK_MB} MB"
echo "" echo ""
@ -425,7 +423,7 @@ function configure_linux {
echo -n "To change this, please adjust limits.conf memlock " echo -n "To change this, please adjust limits.conf memlock "
echo "limit for current user." echo "limit for current user."
if [ $MEMLOCK_AMNT -lt 65536 ] ; then if [ $MEMLOCK_AMNT -lt 65536 ]; then
echo "" echo ""
echo "## WARNING: memlock limit is less than 64MB" echo "## WARNING: memlock limit is less than 64MB"
echo -n "## DPDK with VFIO may not be able to initialize " echo -n "## DPDK with VFIO may not be able to initialize "
@ -442,7 +440,7 @@ function configure_linux {
fi fi
} }
function reset_linux_pci { function reset_linux_pci() {
# NVMe # NVMe
set +e set +e
check_for_driver nvme check_for_driver nvme
@ -464,14 +462,13 @@ function reset_linux_pci {
TMP=$(mktemp) TMP=$(mktemp)
#collect all the device_id info of ioat devices. #collect all the device_id info of ioat devices.
grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \ grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP | awk -F"x" '{print $2}' > $TMP
set +e set +e
check_for_driver ioatdma check_for_driver ioatdma
driver_loaded=$? driver_loaded=$?
set -e set -e
while IFS= read -r dev_id while IFS= read -r dev_id; do
do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if ! pci_can_use $bdf; then if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device" pci_dev_echo "$bdf" "Skipping un-whitelisted I/OAT device"
@ -486,44 +483,42 @@ function reset_linux_pci {
done < $TMP done < $TMP
rm $TMP rm $TMP
# IDXD # IDXD
TMP=$(mktemp) TMP=$(mktemp)
#collect all the device_id info of idxd devices. #collect all the device_id info of idxd devices.
grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \ grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP | awk -F"x" '{print $2}' > $TMP
set +e set +e
check_for_driver idxd check_for_driver idxd
driver_loaded=$? driver_loaded=$?
set -e set -e
while IFS= read -r dev_id while IFS= read -r dev_id; do
do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do if ! pci_can_use $bdf; then
if ! pci_can_use $bdf; then pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device"
pci_dev_echo "$bdf" "Skipping un-whitelisted IDXD device" continue
continue fi
fi if [ $driver_loaded -ne 0 ]; then
if [ $driver_loaded -ne 0 ]; then linux_bind_driver "$bdf" idxd
linux_bind_driver "$bdf" idxd else
else linux_unbind_driver "$bdf"
linux_unbind_driver "$bdf" fi
fi done
done done < $TMP
done < $TMP rm $TMP
rm $TMP
# virtio # virtio
TMP=$(mktemp) TMP=$(mktemp)
#collect all the device_id info of virtio devices. #collect all the device_id info of virtio devices.
grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \ grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP | awk -F"x" '{print $2}' > $TMP
# TODO: check if virtio-pci is loaded first and just unbind if it is not loaded # TODO: check if virtio-pci is loaded first and just unbind if it is not loaded
# Requires some more investigation - for example, some kernels do not seem to have # Requires some more investigation - for example, some kernels do not seem to have
# virtio-pci but just virtio_scsi instead. Also need to make sure we get the # virtio-pci but just virtio_scsi instead. Also need to make sure we get the
# underscore vs. dash right in the virtio_scsi name. # underscore vs. dash right in the virtio_scsi name.
modprobe virtio-pci || true modprobe virtio-pci || true
while IFS= read -r dev_id while IFS= read -r dev_id; do
do
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
if ! pci_can_use $bdf; then if ! pci_can_use $bdf; then
pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at" pci_dev_echo "$bdf" "Skipping un-whitelisted Virtio device at"
@ -538,14 +533,13 @@ function reset_linux_pci {
TMP=$(mktemp) TMP=$(mktemp)
#collect all the device_id info of vmd devices. #collect all the device_id info of vmd devices.
grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \ grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}' > $TMP | awk -F"x" '{print $2}' > $TMP
set +e set +e
check_for_driver vmd check_for_driver vmd
driver_loaded=$? driver_loaded=$?
set -e set -e
while IFS= read -r dev_id while IFS= read -r dev_id; do
do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
if ! pci_can_use $bdf; then if ! pci_can_use $bdf; then
echo "Skipping un-whitelisted VMD device at $bdf" echo "Skipping un-whitelisted VMD device at $bdf"
@ -563,7 +557,7 @@ function reset_linux_pci {
echo "1" > "/sys/bus/pci/rescan" echo "1" > "/sys/bus/pci/rescan"
} }
function reset_linux { function reset_linux() {
reset_linux_pci reset_linux_pci
for mount in $(linux_hugetlbfs_mounts); do for mount in $(linux_hugetlbfs_mounts); do
rm -f "$mount"/spdk*map_* rm -f "$mount"/spdk*map_*
@ -571,9 +565,9 @@ function reset_linux {
rm -f /run/.spdk* rm -f /run/.spdk*
} }
function status_linux { function status_linux() {
echo "Hugepages" echo "Hugepages"
printf "%-6s %10s %8s / %6s\n" "node" "hugesize" "free" "total" printf "%-6s %10s %8s / %6s\n" "node" "hugesize" "free" "total"
numa_nodes=0 numa_nodes=0
shopt -s nullglob shopt -s nullglob
@ -606,7 +600,7 @@ function status_linux {
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name" echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name"
for bdf in ${pci_bus_cache["0x010802"]}; do for bdf in ${pci_bus_cache["0x010802"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}') driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
if [ "$numa_nodes" = "0" ]; then if [ "$numa_nodes" = "0" ]; then
node="-" node="-"
else else
@ -615,11 +609,11 @@ function status_linux {
device=$(cat /sys/bus/pci/devices/$bdf/device) device=$(cat /sys/bus/pci/devices/$bdf/device)
vendor=$(cat /sys/bus/pci/devices/$bdf/vendor) vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
if [ "$driver" = "nvme" ] && [ -d /sys/bus/pci/devices/$bdf/nvme ]; then if [ "$driver" = "nvme" ] && [ -d /sys/bus/pci/devices/$bdf/nvme ]; then
name="\t"$(ls /sys/bus/pci/devices/$bdf/nvme); name="\t"$(ls /sys/bus/pci/devices/$bdf/nvme)
else else
name="-"; name="-"
fi fi
echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}\t\t$name"; echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}\t\t$name"
done done
echo "" echo ""
@ -627,11 +621,11 @@ function status_linux {
#collect all the device_id info of ioat devices. #collect all the device_id info of ioat devices.
TMP=$(grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \ TMP=$(grep "PCI_DEVICE_ID_INTEL_IOAT" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}') | awk -F"x" '{print $2}')
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver" echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver"
for dev_id in $TMP; do for dev_id in $TMP; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}') driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
if [ "$numa_nodes" = "0" ]; then if [ "$numa_nodes" = "0" ]; then
node="-" node="-"
else else
@ -643,37 +637,37 @@ function status_linux {
done done
done done
echo "" echo ""
echo "IDXD DMA" echo "IDXD DMA"
#collect all the device_id info of idxd devices. #collect all the device_id info of idxd devices.
TMP=$(grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \ TMP=$(grep "PCI_DEVICE_ID_INTEL_IDXD" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}') | awk -F"x" '{print $2}')
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver" echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver"
for dev_id in $TMP; do for dev_id in $TMP; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}') driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
if [ "$numa_nodes" = "0" ]; then if [ "$numa_nodes" = "0" ]; then
node="-" node="-"
else else
node=$(cat /sys/bus/pci/devices/$bdf/numa_node) node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
fi fi
device=$(cat /sys/bus/pci/devices/$bdf/device) device=$(cat /sys/bus/pci/devices/$bdf/device)
vendor=$(cat /sys/bus/pci/devices/$bdf/vendor) vendor=$(cat /sys/bus/pci/devices/$bdf/vendor)
echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}" echo -e "$bdf\t${vendor#0x}\t${device#0x}\t$node\t${driver:--}"
done done
done done
echo "" echo ""
echo "virtio" echo "virtio"
#collect all the device_id info of virtio devices. #collect all the device_id info of virtio devices.
TMP=$(grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \ TMP=$(grep "PCI_DEVICE_ID_VIRTIO" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}') | awk -F"x" '{print $2}')
echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name" echo -e "BDF\t\tVendor\tDevice\tNUMA\tDriver\t\tDevice name"
for dev_id in $TMP; do for dev_id in $TMP; do
for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x1af4:0x$dev_id"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}') driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
if [ "$numa_nodes" = "0" ]; then if [ "$numa_nodes" = "0" ]; then
node="-" node="-"
else else
@ -691,18 +685,18 @@ function status_linux {
#collect all the device_id info of vmd devices. #collect all the device_id info of vmd devices.
TMP=$(grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \ TMP=$(grep "PCI_DEVICE_ID_INTEL_VMD" $rootdir/include/spdk/pci_ids.h \
| awk -F"x" '{print $2}') | awk -F"x" '{print $2}')
echo -e "BDF\t\tNuma Node\tDriver Name" echo -e "BDF\t\tNuma Node\tDriver Name"
for dev_id in $TMP; do for dev_id in $TMP; do
for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do for bdf in ${pci_bus_cache["0x8086:0x$dev_id"]}; do
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}') driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
node=$(cat /sys/bus/pci/devices/$bdf/numa_node); node=$(cat /sys/bus/pci/devices/$bdf/numa_node)
echo -e "$bdf\t$node\t\t$driver" echo -e "$bdf\t$node\t\t$driver"
done done
done done
} }
function configure_freebsd_pci { function configure_freebsd_pci() {
local devs ids id local devs ids id
local BDFS local BDFS
@ -730,7 +724,7 @@ function configure_freebsd_pci {
kldload nic_uio.ko kldload nic_uio.ko
} }
function configure_freebsd { function configure_freebsd() {
configure_freebsd_pci configure_freebsd_pci
# If contigmem is already loaded but the HUGEMEM specified doesn't match the # If contigmem is already loaded but the HUGEMEM specified doesn't match the
# previous value, unload contigmem so that we can reload with the new value. # previous value, unload contigmem so that we can reload with the new value.
@ -746,7 +740,7 @@ function configure_freebsd {
fi fi
} }
function reset_freebsd { function reset_freebsd() {
kldunload contigmem.ko || true kldunload contigmem.ko || true
kldunload nic_uio.ko || true kldunload nic_uio.ko || true
} }
@ -774,14 +768,14 @@ fi
if [ -z "$TARGET_USER" ]; then if [ -z "$TARGET_USER" ]; then
TARGET_USER="$SUDO_USER" TARGET_USER="$SUDO_USER"
if [ -z "$TARGET_USER" ]; then if [ -z "$TARGET_USER" ]; then
TARGET_USER=$(logname 2>/dev/null) || true TARGET_USER=$(logname 2> /dev/null) || true
fi fi
fi fi
if [ $(uname) = Linux ]; then if [ $(uname) = Linux ]; then
HUGEPGSZ=$(( $(grep Hugepagesize /proc/meminfo | cut -d : -f 2 | tr -dc '0-9') )) HUGEPGSZ=$(($(grep Hugepagesize /proc/meminfo | cut -d : -f 2 | tr -dc '0-9')))
HUGEPGSZ_MB=$(( HUGEPGSZ / 1024 )) HUGEPGSZ_MB=$((HUGEPGSZ / 1024))
: ${NRHUGE=$(( (HUGEMEM + HUGEPGSZ_MB - 1) / HUGEPGSZ_MB ))} : ${NRHUGE=$(((HUGEMEM + HUGEPGSZ_MB - 1) / HUGEPGSZ_MB))}
if [ "$mode" == "config" ]; then if [ "$mode" == "config" ]; then
configure_linux configure_linux

View File

@ -19,25 +19,25 @@ while getopts "s:n:t:h-:" opt; do
echo " Invalid argument: $OPTARG" echo " Invalid argument: $OPTARG"
usage usage
exit 1 exit 1
;; ;;
s) s)
size=$OPTARG size=$OPTARG
;; ;;
n) n)
nvme_disk=$OPTARG nvme_disk=$OPTARG
;; ;;
t) t)
type=$OPTARG type=$OPTARG
;; ;;
h) h)
usage usage
exit 0 exit 0
;; ;;
*) *)
echo " Invalid argument: $OPTARG" echo " Invalid argument: $OPTARG"
usage usage
exit 1 exit 1
;; ;;
esac esac
done done
@ -46,18 +46,18 @@ if [ ! "${SYSTEM}" = "FreeBSD" ]; then
case $type in case $type in
"nvme") "nvme")
qemu-img create -f raw $nvme_disk $size qemu-img create -f raw $nvme_disk $size
;; ;;
"ocssd") "ocssd")
if [ $size == "1024M" ]; then if [ $size == "1024M" ]; then
size="9G" size="9G"
fi fi
fallocate -l $size $nvme_disk fallocate -l $size $nvme_disk
touch ${nvme_disk}_ocssd_md touch ${nvme_disk}_ocssd_md
;; ;;
*) *)
echo "We support only nvme and ocssd disks types" echo "We support only nvme and ocssd disks types"
exit 1 exit 1
;; ;;
esac esac
#Change SE Policy on Fedora #Change SE Policy on Fedora
if [ $WHICH_OS == "Fedora" ]; then if [ $WHICH_OS == "Fedora" ]; then

View File

@ -11,8 +11,8 @@ set -e
VAGRANT_TARGET="$PWD" VAGRANT_TARGET="$PWD"
DIR="$( cd "$( dirname $0 )" && pwd )" DIR="$(cd "$(dirname $0)" && pwd)"
SPDK_DIR="$( cd "${DIR}/../../" && pwd )" SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
# The command line help # The command line help
display_help() { display_help() {
@ -85,103 +85,103 @@ VAGRANT_HUGE_MEM=0
while getopts ":b:n:s:x:p:u:vcraldHh-:" opt; do while getopts ":b:n:s:x:p:u:vcraldHh-:" opt; do
case "${opt}" in case "${opt}" in
-) -)
case "${OPTARG}" in case "${OPTARG}" in
package-box) VAGRANT_PACKAGE_BOX=1 ;; package-box) VAGRANT_PACKAGE_BOX=1 ;;
qemu-emulator=*) SPDK_QEMU_EMULATOR="${OPTARG#*=}" ;; qemu-emulator=*) SPDK_QEMU_EMULATOR="${OPTARG#*=}" ;;
vagrantfiles-dir=*) VAGRANTFILE_DIR="${OPTARG#*=}" ;; vagrantfiles-dir=*) VAGRANTFILE_DIR="${OPTARG#*=}" ;;
*) echo "Invalid argument '$OPTARG'" ;; *) echo "Invalid argument '$OPTARG'" ;;
esac esac
;; ;;
x) x)
http_proxy=$OPTARG http_proxy=$OPTARG
https_proxy=$http_proxy https_proxy=$http_proxy
SPDK_VAGRANT_HTTP_PROXY="${http_proxy}" SPDK_VAGRANT_HTTP_PROXY="${http_proxy}"
;; ;;
n) n)
SPDK_VAGRANT_VMCPU=$OPTARG SPDK_VAGRANT_VMCPU=$OPTARG
;; ;;
s) s)
SPDK_VAGRANT_VMRAM=$OPTARG SPDK_VAGRANT_VMRAM=$OPTARG
;; ;;
p) p)
SPDK_VAGRANT_PROVIDER=$OPTARG SPDK_VAGRANT_PROVIDER=$OPTARG
;; ;;
v) v)
VERBOSE=1 VERBOSE=1
;; ;;
c) c)
NVME_AUTO_CREATE=1 NVME_AUTO_CREATE=1
;; ;;
r) r)
DRY_RUN=1 DRY_RUN=1
;; ;;
h) h)
display_help >&2 display_help >&2
exit 0 exit 0
;; ;;
a) a)
COPY_SPDK_ARTIFACTS=1 COPY_SPDK_ARTIFACTS=1
;; ;;
l) l)
COPY_SPDK_DIR=0 COPY_SPDK_DIR=0
;; ;;
d) d)
DEPLOY_TEST_VM=1 DEPLOY_TEST_VM=1
;; ;;
b) b)
NVME_FILE+="${OPTARG#*=} " NVME_FILE+="${OPTARG#*=} "
;; ;;
u) u)
VAGRANT_PASSWORD_AUTH=1 VAGRANT_PASSWORD_AUTH=1
;; ;;
H) H)
VAGRANT_HUGE_MEM=1 VAGRANT_HUGE_MEM=1
;; ;;
*) *)
echo " Invalid argument: -$OPTARG" >&2 echo " Invalid argument: -$OPTARG" >&2
echo " Try: \"$0 -h\"" >&2 echo " Try: \"$0 -h\"" >&2
exit 1 exit 1
;; ;;
esac esac
done done
shift "$((OPTIND-1))" # Discard the options and sentinel -- shift "$((OPTIND - 1))" # Discard the options and sentinel --
SPDK_VAGRANT_DISTRO="$*" SPDK_VAGRANT_DISTRO="$*"
case "${SPDK_VAGRANT_DISTRO}" in case "${SPDK_VAGRANT_DISTRO}" in
centos7) centos7)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
centos8) centos8)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
ubuntu1604) ubuntu1604)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
ubuntu1804) ubuntu1804)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
fedora30) fedora30)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
fedora31) fedora31)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
freebsd11) freebsd11)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
freebsd12) freebsd12)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
arch) arch)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
*) *)
echo " Invalid argument \"${SPDK_VAGRANT_DISTRO}\"" echo " Invalid argument \"${SPDK_VAGRANT_DISTRO}\""
echo " Try: \"$0 -h\"" >&2 echo " Try: \"$0 -h\"" >&2
exit 1 exit 1
;; ;;
esac esac
if ! echo "$SPDK_VAGRANT_DISTRO" | grep -q fedora && [ $DEPLOY_TEST_VM -eq 1 ]; then if ! echo "$SPDK_VAGRANT_DISTRO" | grep -q fedora && [ $DEPLOY_TEST_VM -eq 1 ]; then
@ -195,15 +195,15 @@ else
TMP="" TMP=""
for args in $NVME_FILE; do for args in $NVME_FILE; do
while IFS=, read -r path type namespace; do while IFS=, read -r path type namespace; do
TMP+="$path,"; TMP+="$path,"
if [ -z "$type" ]; then if [ -z "$type" ]; then
type="nvme" type="nvme"
fi fi
NVME_DISKS_TYPE+="$type,"; NVME_DISKS_TYPE+="$type,"
if [ -z "$namespace" ] && [ -n "$SPDK_QEMU_EMULATOR" ]; then if [ -z "$namespace" ] && [ -n "$SPDK_QEMU_EMULATOR" ]; then
namespace="1" namespace="1"
fi fi
NVME_DISKS_NAMESPACES+="$namespace,"; NVME_DISKS_NAMESPACES+="$namespace,"
if [ ${NVME_AUTO_CREATE} = 1 ]; then if [ ${NVME_AUTO_CREATE} = 1 ]; then
$SPDK_DIR/scripts/vagrant/create_nvme_img.sh -t $type -n $path $SPDK_DIR/scripts/vagrant/create_nvme_img.sh -t $type -n $path
fi fi
@ -247,15 +247,15 @@ export VAGRANT_PASSWORD_AUTH
export VAGRANT_HUGE_MEM export VAGRANT_HUGE_MEM
if [ -n "$SPDK_VAGRANT_PROVIDER" ]; then if [ -n "$SPDK_VAGRANT_PROVIDER" ]; then
provider="--provider=${SPDK_VAGRANT_PROVIDER}" provider="--provider=${SPDK_VAGRANT_PROVIDER}"
fi fi
if [ -n "$SPDK_VAGRANT_PROVIDER" ]; then if [ -n "$SPDK_VAGRANT_PROVIDER" ]; then
export SPDK_VAGRANT_PROVIDER export SPDK_VAGRANT_PROVIDER
fi fi
if [ -n "$SPDK_QEMU_EMULATOR" ] && [ "$SPDK_VAGRANT_PROVIDER" == "libvirt" ]; then if [ -n "$SPDK_QEMU_EMULATOR" ] && [ "$SPDK_VAGRANT_PROVIDER" == "libvirt" ]; then
export SPDK_QEMU_EMULATOR export SPDK_QEMU_EMULATOR
fi fi
if [ ${DRY_RUN} = 1 ]; then if [ ${DRY_RUN} = 1 ]; then
@ -296,7 +296,7 @@ if [ ${DRY_RUN} != 1 ]; then
vagrant plugin install vagrant-proxyconf vagrant plugin install vagrant-proxyconf
fi fi
if echo "$SPDK_VAGRANT_DISTRO" | grep -q freebsd; then if echo "$SPDK_VAGRANT_DISTRO" | grep -q freebsd; then
cat >~/vagrant_pkg.conf <<EOF cat > ~/vagrant_pkg.conf << EOF
pkg_env: { pkg_env: {
http_proxy: ${http_proxy} http_proxy: ${http_proxy}
} }
@ -309,8 +309,8 @@ EOF
vagrant ssh -c 'sudo spdk_repo/spdk/scripts/vagrant/update.sh' vagrant ssh -c 'sudo spdk_repo/spdk/scripts/vagrant/update.sh'
vagrant halt vagrant halt
vagrant package --output spdk_${SPDK_VAGRANT_DISTRO}.box vagrant package --output spdk_${SPDK_VAGRANT_DISTRO}.box
vagrant box add spdk/${SPDK_VAGRANT_DISTRO} spdk_${SPDK_VAGRANT_DISTRO}.box && vagrant box add spdk/${SPDK_VAGRANT_DISTRO} spdk_${SPDK_VAGRANT_DISTRO}.box \
rm spdk_${SPDK_VAGRANT_DISTRO}.box && rm spdk_${SPDK_VAGRANT_DISTRO}.box
vagrant destroy vagrant destroy
fi fi
echo "" echo ""

View File

@ -8,8 +8,8 @@ set -e
VAGRANT_TARGET="$PWD" VAGRANT_TARGET="$PWD"
DIR="$( cd "$( dirname $0 )" && pwd )" DIR="$(cd "$(dirname $0)" && pwd)"
SPDK_DIR="$( cd "${DIR}/../../" && pwd )" SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
USE_SSH_DIR="" USE_SSH_DIR=""
MOVE_TO_DEFAULT_DIR=false MOVE_TO_DEFAULT_DIR=false
INSTALL_DEPS=false INSTALL_DEPS=false
@ -37,54 +37,53 @@ display_help() {
while getopts ":h-:" opt; do while getopts ":h-:" opt; do
case "${opt}" in case "${opt}" in
-) -)
case "${OPTARG}" in case "${OPTARG}" in
use-ssh-dir=*) USE_SSH_DIR="${OPTARG#*=}" ;; use-ssh-dir=*) USE_SSH_DIR="${OPTARG#*=}" ;;
move-to-default-dir) MOVE_TO_DEFAULT_DIR=true ;; move-to-default-dir) MOVE_TO_DEFAULT_DIR=true ;;
install-deps) INSTALL_DEPS=true ;; install-deps) INSTALL_DEPS=true ;;
http-proxy=*) http-proxy=*)
http_proxy=$OPTARG http_proxy=$OPTARG
https_proxy=$http_proxy https_proxy=$http_proxy
SPDK_VAGRANT_HTTP_PROXY="${http_proxy}" SPDK_VAGRANT_HTTP_PROXY="${http_proxy}"
;;
*)
echo " Invalid argument -$OPTARG" >&2
echo " Try \"$0 -h\"" >&2
exit 1
;;
esac
;; ;;
*)
echo " Invalid argument -$OPTARG" >&2
echo " Try \"$0 -h\"" >&2
exit 1
;;
esac
;;
h) h)
display_help >&2 display_help >&2
exit 0 exit 0
;; ;;
*) *)
echo " Invalid argument: -$OPTARG" >&2 echo " Invalid argument: -$OPTARG" >&2
echo " Try: \"$0 -h\"" >&2 echo " Try: \"$0 -h\"" >&2
exit 1 exit 1
;; ;;
esac esac
done done
export SPDK_DIR export SPDK_DIR
export SPDK_VAGRANT_HTTP_PROXY export SPDK_VAGRANT_HTTP_PROXY
export INSTALL_DEPS export INSTALL_DEPS
shift "$((OPTIND - 1))" # Discard the options and sentinel --
shift "$((OPTIND-1))" # Discard the options and sentinel --
SPDK_VAGRANT_DISTRO="$*" SPDK_VAGRANT_DISTRO="$*"
case "${SPDK_VAGRANT_DISTRO}" in case "${SPDK_VAGRANT_DISTRO}" in
ubuntu16) ubuntu16)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
ubuntu18) ubuntu18)
export SPDK_VAGRANT_DISTRO export SPDK_VAGRANT_DISTRO
;; ;;
*) *)
echo " Invalid argument \"${SPDK_VAGRANT_DISTRO}\"" echo " Invalid argument \"${SPDK_VAGRANT_DISTRO}\""
echo " Try: \"$0 -h\"" >&2 echo " Try: \"$0 -h\"" >&2
exit 1 exit 1
;; ;;
esac esac
mkdir -vp "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}" mkdir -vp "${VAGRANT_TARGET}/${SPDK_VAGRANT_DISTRO}"

View File

@ -70,25 +70,30 @@ set -e
NOOP=0 NOOP=0
METHOD=0 METHOD=0
V=1 V=1
OPTIND=1 # Reset in case getopts has been used previously in the shell. OPTIND=1 # Reset in case getopts has been used previously in the shell.
while getopts "d:qhn" opt; do while getopts "d:qhn" opt; do
case "$opt" in case "$opt" in
d) SPDK_SOURCE_PATH=$($READLINK -f $OPTARG) d)
SPDK_SOURCE_PATH=$($READLINK -f $OPTARG)
echo Using SPDK source at ${SPDK_SOURCE_PATH} echo Using SPDK source at ${SPDK_SOURCE_PATH}
METHOD=1 METHOD=1
;; ;;
q) V=0 q)
;; V=0
n) NOOP=1 ;;
;; n)
h) display_help >&2 NOOP=1
;;
h)
display_help >&2
exit 0 exit 0
;; ;;
*) echo "Invalid option" *)
echo "Invalid option"
echo "" echo ""
display_help >&2 display_help >&2
exit 1 exit 1
;; ;;
esac esac
done done
@ -141,12 +146,12 @@ case "$METHOD" in
fi fi
GIT_REPO_PATH="${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/${BUILD_NAME}" GIT_REPO_PATH="${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/${BUILD_NAME}"
;; ;;
*) *)
echo "Internal Error: Must specify a source path or branch name" echo "Internal Error: Must specify a source path or branch name"
display_help display_help
exit 1 exit 1
;; ;;
esac esac
AUTOTEST_RESULTS="${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/${BUILD_NAME}" AUTOTEST_RESULTS="${SPDK_AUTOTEST_LOCAL_PATH}/${GIT_BRANCH}/${BUILD_NAME}"
@ -203,12 +208,12 @@ if [[ ${NOOP} -eq 0 ]]; then
sudo "${MAKE}" clean -j $(nproc) sudo "${MAKE}" clean -j $(nproc)
sudo "${GIT}" clean -d -f sudo "${GIT}" clean -d -f
popd popd
;; ;;
*) *)
echo "Internal Error: Must specify a source path or branch name" echo "Internal Error: Must specify a source path or branch name"
display_help display_help
exit 1 exit 1
;; ;;
esac esac
trap "echo ERROR; exit" INT TERM EXIT trap "echo ERROR; exit" INT TERM EXIT

View File

@ -9,8 +9,8 @@ fi
set -e set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SPDK_DIR="$( cd "${DIR}/../../" && pwd )" SPDK_DIR="$(cd "${DIR}/../../" && pwd)"
echo "SPDK_DIR = $SPDK_DIR" echo "SPDK_DIR = $SPDK_DIR"
# Bug fix for vagrant rsync problem # Bug fix for vagrant rsync problem
@ -50,7 +50,7 @@ else
# get the requested number of hugepages without rebooting. # get the requested number of hugepages without rebooting.
# So do it here just in case # So do it here just in case
sysctl -w vm.nr_hugepages=1024 sysctl -w vm.nr_hugepages=1024
HUGEPAGES=$(sysctl -n vm.nr_hugepages) HUGEPAGES=$(sysctl -n vm.nr_hugepages)
if [ $HUGEPAGES != 1024 ]; then if [ $HUGEPAGES != 1024 ]; then
echo "Warning: Unable to get 1024 hugepages, only got $HUGEPAGES" echo "Warning: Unable to get 1024 hugepages, only got $HUGEPAGES"
echo "Warning: Adjusting HUGEMEM in /home/vagrant/autorun-spdk.conf" echo "Warning: Adjusting HUGEMEM in /home/vagrant/autorun-spdk.conf"
@ -59,9 +59,9 @@ else
fi fi
# Figure out what system we are running on # Figure out what system we are running on
if [ -f /etc/lsb-release ];then if [ -f /etc/lsb-release ]; then
. /etc/lsb-release . /etc/lsb-release
elif [ -f /etc/redhat-release ];then elif [ -f /etc/redhat-release ]; then
yum update -y yum update -y
yum install -y redhat-lsb yum install -y redhat-lsb
DISTRIB_ID=$(lsb_release -si) DISTRIB_ID=$(lsb_release -si)

View File

@ -14,10 +14,10 @@ function raid_unmap_data_verify() {
local nbd=$1 local nbd=$1
local rpc_server=$2 local rpc_server=$2
local blksize local blksize
blksize=$(lsblk -o LOG-SEC $nbd | grep -v LOG-SEC | cut -d ' ' -f 5) blksize=$(lsblk -o LOG-SEC $nbd | grep -v LOG-SEC | cut -d ' ' -f 5)
local rw_blk_num=4096 local rw_blk_num=4096
local rw_len=$((blksize * rw_blk_num)) local rw_len=$((blksize * rw_blk_num))
local unmap_blk_offs=(0 1028 321) local unmap_blk_offs=(0 1028 321)
local unmap_blk_nums=(128 2035 456) local unmap_blk_nums=(128 2035 456)
local unmap_off local unmap_off
local unmap_len local unmap_len
@ -30,7 +30,7 @@ function raid_unmap_data_verify() {
# confirm random data is written correctly in raid0 device # confirm random data is written correctly in raid0 device
cmp -b -n $rw_len $tmp_file $nbd cmp -b -n $rw_len $tmp_file $nbd
for (( i=0; i<${#unmap_blk_offs[@]}; i++ )); do for ((i = 0; i < ${#unmap_blk_offs[@]}; i++)); do
unmap_off=$((blksize * ${unmap_blk_offs[$i]})) unmap_off=$((blksize * ${unmap_blk_offs[$i]}))
unmap_len=$((blksize * ${unmap_blk_nums[$i]})) unmap_len=$((blksize * ${unmap_blk_nums[$i]}))
@ -63,9 +63,9 @@ function configure_raid_bdev() {
rm -rf $testdir/rpcs.txt rm -rf $testdir/rpcs.txt
cat <<- EOL >> $testdir/rpcs.txt cat <<- EOL >> $testdir/rpcs.txt
bdev_malloc_create 32 512 -b Base_1 bdev_malloc_create 32 512 -b Base_1
bdev_malloc_create 32 512 -b Base_2 bdev_malloc_create 32 512 -b Base_2
bdev_raid_create -z 64 -r 0 -b "Base_1 Base_2" -n raid0 bdev_raid_create -z 64 -r 0 -b "Base_1 Base_2" -n raid0
EOL EOL
$rpc_py < $testdir/rpcs.txt $rpc_py < $testdir/rpcs.txt

View File

@ -8,7 +8,7 @@ source $testdir/nbd_common.sh
rpc_py="$rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
conf_file="$testdir/bdev.json" conf_file="$testdir/bdev.json"
# Make sure the configuration is clean # Make sure the configuration is clean
:>"$conf_file" : > "$conf_file"
function cleanup() { function cleanup() {
rm -f "/tmp/aiofile" rm -f "/tmp/aiofile"
@ -28,7 +28,7 @@ function start_spdk_tgt() {
} }
function setup_bdev_conf() { function setup_bdev_conf() {
"$rpc_py" <<-RPC "$rpc_py" <<- RPC
bdev_split_create Malloc1 2 bdev_split_create Malloc1 2
bdev_split_create -s 4 Malloc2 8 bdev_split_create -s 4 Malloc2 8
bdev_malloc_create -b Malloc0 32 512 bdev_malloc_create -b Malloc0 32 512
@ -69,7 +69,8 @@ function setup_gpt_conf() {
dev=/dev/${nvme_dev##*/} dev=/dev/${nvme_dev##*/}
if ! pt=$(parted "$dev" -ms print 2>&1); then if ! pt=$(parted "$dev" -ms print 2>&1); then
[[ $pt == *"$dev: unrecognised disk label"* ]] || continue [[ $pt == *"$dev: unrecognised disk label"* ]] || continue
gpt_nvme=$dev; break gpt_nvme=$dev
break
fi fi
done done
if [[ -n $gpt_nvme ]]; then if [[ -n $gpt_nvme ]]; then
@ -78,7 +79,7 @@ function setup_gpt_conf() {
# change the GUID to SPDK GUID value # change the GUID to SPDK GUID value
# FIXME: Hardcode this in some common place, this value should not be changed much # FIXME: Hardcode this in some common place, this value should not be changed much
IFS="()" read -r _ SPDK_GPT_GUID _ < <(grep SPDK_GPT_PART_TYPE_GUID module/bdev/gpt/gpt.h) IFS="()" read -r _ SPDK_GPT_GUID _ < <(grep SPDK_GPT_PART_TYPE_GUID module/bdev/gpt/gpt.h)
SPDK_GPT_GUID=${SPDK_GPT_GUID//, /-} SPDK_GPT_GUID=${SPDK_GPT_GUID//0x} SPDK_GPT_GUID=${SPDK_GPT_GUID//, /-} SPDK_GPT_GUID=${SPDK_GPT_GUID//0x/}
sgdisk -t "1:$SPDK_GPT_GUID" "$gpt_nvme" sgdisk -t "1:$SPDK_GPT_GUID" "$gpt_nvme"
sgdisk -t "2:$SPDK_GPT_GUID" "$gpt_nvme" sgdisk -t "2:$SPDK_GPT_GUID" "$gpt_nvme"
"$rootdir/scripts/setup.sh" "$rootdir/scripts/setup.sh"
@ -89,7 +90,7 @@ function setup_gpt_conf() {
"$rootdir/scripts/setup.sh" "$rootdir/scripts/setup.sh"
return 1 return 1
fi fi
else else
# Not supported platform or missing tooling, nothing to be done, simply exit the test # Not supported platform or missing tooling, nothing to be done, simply exit the test
# in a graceful manner. # in a graceful manner.
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT
@ -101,7 +102,7 @@ function setup_gpt_conf() {
function setup_crypto_aesni_conf() { function setup_crypto_aesni_conf() {
# Malloc0 and Malloc1 use AESNI # Malloc0 and Malloc1 use AESNI
"$rpc_py" <<-RPC "$rpc_py" <<- RPC
bdev_malloc_create -b Malloc0 16 512 bdev_malloc_create -b Malloc0 16 512
bdev_malloc_create -b Malloc1 16 512 bdev_malloc_create -b Malloc1 16 512
bdev_crypto_create Malloc0 crypto_ram crypto_aesni_mb 0123456789123456 bdev_crypto_create Malloc0 crypto_ram crypto_aesni_mb 0123456789123456
@ -112,7 +113,7 @@ function setup_crypto_aesni_conf() {
function setup_crypto_qat_conf() { function setup_crypto_qat_conf() {
# Malloc0 will use QAT AES_CBC # Malloc0 will use QAT AES_CBC
# Malloc1 will use QAT AES_XTS # Malloc1 will use QAT AES_XTS
"$rpc_py" <<-RPC "$rpc_py" <<- RPC
bdev_malloc_create -b Malloc0 16 512 bdev_malloc_create -b Malloc0 16 512
bdev_malloc_create -b Malloc1 16 512 bdev_malloc_create -b Malloc1 16 512
bdev_crypto_create Malloc0 crypto_ram crypto_qat 0123456789123456 bdev_crypto_create Malloc0 crypto_ram crypto_qat 0123456789123456
@ -195,7 +196,7 @@ function fio_test_suite() {
local fio_params="--ioengine=spdk_bdev --iodepth=8 --bs=4k --runtime=10 $testdir/bdev.fio --spdk_json_conf=$conf_file" local fio_params="--ioengine=spdk_bdev --iodepth=8 --bs=4k --runtime=10 $testdir/bdev.fio --spdk_json_conf=$conf_file"
run_test "bdev_fio_rw_verify" fio_bdev $fio_params --spdk_mem=$PRE_RESERVED_MEM \ run_test "bdev_fio_rw_verify" fio_bdev $fio_params --spdk_mem=$PRE_RESERVED_MEM \
--output=$output_dir/blockdev_fio_verify.txt --output=$output_dir/blockdev_fio_verify.txt
rm -f ./*.state rm -f ./*.state
rm -f $testdir/bdev.fio rm -f $testdir/bdev.fio
@ -227,20 +228,19 @@ function get_io_result() {
iostat_result=$(awk '{print $6}' <<< $iostat_result) iostat_result=$(awk '{print $6}' <<< $iostat_result)
fi fi
echo ${iostat_result/.*} echo ${iostat_result/.*/}
} }
function run_qos_test() { function run_qos_test() {
local qos_limit=$1 local qos_limit=$1
local qos_result=0 local qos_result=0
qos_result=$(get_io_result $2 $3) qos_result=$(get_io_result $2 $3)
if [ $2 = BANDWIDTH ]; then if [ $2 = BANDWIDTH ]; then
qos_limit=$((qos_limit*1024)) qos_limit=$((qos_limit * 1024))
fi fi
lower_limit=$((qos_limit*9/10)) lower_limit=$((qos_limit * 9 / 10))
upper_limit=$((qos_limit*11/10)) upper_limit=$((qos_limit * 11 / 10))
# QoS realization is related with bytes transfered. It currently has some variation. # QoS realization is related with bytes transfered. It currently has some variation.
if [ $qos_result -lt $lower_limit ] || [ $qos_result -gt $upper_limit ]; then if [ $qos_result -lt $lower_limit ] || [ $qos_result -gt $upper_limit ]; then
@ -261,7 +261,7 @@ function qos_function_test() {
io_result=$(get_io_result IOPS $QOS_DEV_1) io_result=$(get_io_result IOPS $QOS_DEV_1)
# Set the IOPS limit as one quarter of the measured performance without QoS # Set the IOPS limit as one quarter of the measured performance without QoS
iops_limit=$(((io_result/4)/qos_lower_iops_limit*qos_lower_iops_limit)) iops_limit=$(((io_result / 4) / qos_lower_iops_limit * qos_lower_iops_limit))
if [ $iops_limit -gt $qos_lower_iops_limit ]; then if [ $iops_limit -gt $qos_lower_iops_limit ]; then
# Run bdevperf with IOPS rate limit on bdev 1 # Run bdevperf with IOPS rate limit on bdev 1
@ -271,7 +271,7 @@ function qos_function_test() {
# Run bdevperf with bandwidth rate limit on bdev 2 # Run bdevperf with bandwidth rate limit on bdev 2
# Set the bandwidth limit as 1/10 of the measure performance without QoS # Set the bandwidth limit as 1/10 of the measure performance without QoS
bw_limit=$(get_io_result BANDWIDTH $QOS_DEV_2) bw_limit=$(get_io_result BANDWIDTH $QOS_DEV_2)
bw_limit=$((bw_limit/1024/10)) bw_limit=$((bw_limit / 1024 / 10))
if [ $bw_limit -lt $qos_lower_bw_limit ]; then if [ $bw_limit -lt $qos_lower_bw_limit ]; then
bw_limit=$qos_lower_bw_limit bw_limit=$qos_lower_bw_limit
fi fi
@ -325,34 +325,40 @@ fi
test_type=${1:-bdev} test_type=${1:-bdev}
start_spdk_tgt start_spdk_tgt
case "$test_type" in case "$test_type" in
bdev ) bdev)
setup_bdev_conf;; setup_bdev_conf
nvme ) ;;
setup_nvme_conf;; nvme)
gpt ) setup_nvme_conf
setup_gpt_conf;; ;;
crypto_aesni ) gpt)
setup_crypto_aesni_conf;; setup_gpt_conf
crypto_qat ) ;;
setup_crypto_qat_conf;; crypto_aesni)
pmem ) setup_crypto_aesni_conf
setup_pmem_conf;; ;;
rbd ) crypto_qat)
setup_rbd_conf;; setup_crypto_qat_conf
* ) ;;
pmem)
setup_pmem_conf
;;
rbd)
setup_rbd_conf
;;
*)
echo "invalid test name" echo "invalid test name"
exit 1 exit 1
;; ;;
esac esac
# Generate json config and use it throughout all the tests # Generate json config and use it throughout all the tests
cat <<-CONF >"$conf_file" cat <<- CONF > "$conf_file"
{"subsystems":[ {"subsystems":[
$("$rpc_py" save_subsystem_config -n bdev) $("$rpc_py" save_subsystem_config -n bdev)
]} ]}
CONF CONF
bdevs=$("$rpc_py" bdev_get_bdevs | jq -r '.[] | select(.claimed == false)') bdevs=$("$rpc_py" bdev_get_bdevs | jq -r '.[] | select(.claimed == false)')
bdevs_name=$(echo $bdevs | jq -r '.name') bdevs_name=$(echo $bdevs | jq -r '.name')
bdev_list=($bdevs_name) bdev_list=($bdevs_name)
@ -366,12 +372,12 @@ run_test "bdev_hello_world" $rootdir/examples/bdev/hello_world/hello_bdev --json
run_test "bdev_bounds" bdev_bounds run_test "bdev_bounds" bdev_bounds
run_test "bdev_nbd" nbd_function_test $conf_file "$bdevs_name" run_test "bdev_nbd" nbd_function_test $conf_file "$bdevs_name"
if [[ $CONFIG_FIO_PLUGIN == y ]]; then if [[ $CONFIG_FIO_PLUGIN == y ]]; then
if [ "$test_type" = "nvme" ] || [ "$test_type" = "gpt" ]; then if [ "$test_type" = "nvme" ] || [ "$test_type" = "gpt" ]; then
# TODO: once we get real multi-ns drives, re-enable this test for NVMe. # TODO: once we get real multi-ns drives, re-enable this test for NVMe.
echo "skipping fio tests on NVMe due to multi-ns failures." echo "skipping fio tests on NVMe due to multi-ns failures."
else else
run_test "bdev_fio" fio_test_suite run_test "bdev_fio" fio_test_suite
fi fi
else else
echo "FIO not available" echo "FIO not available"
exit 1 exit 1
@ -386,7 +392,7 @@ fi
# Temporarily disabled - infinite loop # Temporarily disabled - infinite loop
# if [ $RUN_NIGHTLY -eq 1 ]; then # if [ $RUN_NIGHTLY -eq 1 ]; then
# run_test "bdev_reset" $testdir/bdevperf/bdevperf --json "$conf_file" -q 16 -w reset -o 4096 -t 60 # run_test "bdev_reset" $testdir/bdevperf/bdevperf --json "$conf_file" -q 16 -w reset -o 4096 -t 60
# fi # fi
# Bdev and configuration cleanup below this line # Bdev and configuration cleanup below this line

View File

@ -6,7 +6,7 @@ function nbd_start_disks() {
local nbd_list=($3) local nbd_list=($3)
local i local i
for (( i=0; i<${#nbd_list[@]}; i++ )); do for ((i = 0; i < ${#nbd_list[@]}; i++)); do
$rootdir/scripts/rpc.py -s $rpc_server nbd_start_disk ${bdev_list[$i]} ${nbd_list[$i]} $rootdir/scripts/rpc.py -s $rpc_server nbd_start_disk ${bdev_list[$i]} ${nbd_list[$i]}
# Wait for nbd device ready # Wait for nbd device ready
waitfornbd $(basename ${nbd_list[$i]}) waitfornbd $(basename ${nbd_list[$i]})
@ -19,7 +19,7 @@ function nbd_start_disks_without_nbd_idx() {
local i local i
local nbd_device local nbd_device
for (( i=0; i<${#bdev_list[@]}; i++ )); do for ((i = 0; i < ${#bdev_list[@]}; i++)); do
nbd_device=$($rootdir/scripts/rpc.py -s $rpc_server nbd_start_disk ${bdev_list[$i]}) nbd_device=$($rootdir/scripts/rpc.py -s $rpc_server nbd_start_disk ${bdev_list[$i]})
# Wait for nbd device ready # Wait for nbd device ready
waitfornbd $(basename ${nbd_device}) waitfornbd $(basename ${nbd_device})
@ -29,7 +29,7 @@ function nbd_start_disks_without_nbd_idx() {
function waitfornbd_exit() { function waitfornbd_exit() {
local nbd_name=$1 local nbd_name=$1
for ((i=1; i<=20; i++)); do for ((i = 1; i <= 20; i++)); do
if grep -q -w $nbd_name /proc/partitions; then if grep -q -w $nbd_name /proc/partitions; then
sleep 0.1 sleep 0.1
else else

View File

@ -1,9 +1,9 @@
#!/usr/bin/env bash #!/usr/bin/env bash
SYSTEM=$(uname -s) SYSTEM=$(uname -s)
if [ $SYSTEM = "FreeBSD" ] ; then if [ $SYSTEM = "FreeBSD" ]; then
echo "blobfs.sh cannot run on FreeBSD currently." echo "blobfs.sh cannot run on FreeBSD currently."
exit 0 exit 0
fi fi
testdir=$(readlink -f $(dirname $0)) testdir=$(readlink -f $(dirname $0))
@ -28,7 +28,7 @@ function cleanup() {
rm -f $conf_file rm -f $conf_file
} }
function blobfs_start_app { function blobfs_start_app() {
$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -c ${conf_file} & $rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -c ${conf_file} &
blobfs_pid=$! blobfs_pid=$!

View File

@ -10,7 +10,7 @@ dump_db_bench_on_err() {
# Dump entire *.txt to stderr to clearly see what might have failed # Dump entire *.txt to stderr to clearly see what might have failed
xtrace_disable xtrace_disable
mapfile -t step_map <"$db_bench" mapfile -t step_map < "$db_bench"
printf '%s\n' "${step_map[@]/#/* $step (FAILED)}" >&2 printf '%s\n' "${step_map[@]/#/* $step (FAILED)}" >&2
xtrace_restore xtrace_restore
} }
@ -22,9 +22,9 @@ run_step() {
fi fi
cat <<- EOL >> "$1"_flags.txt cat <<- EOL >> "$1"_flags.txt
--spdk=$ROCKSDB_CONF --spdk=$ROCKSDB_CONF
--spdk_bdev=Nvme0n1 --spdk_bdev=Nvme0n1
--spdk_cache_size=$CACHE_SIZE --spdk_cache_size=$CACHE_SIZE
EOL EOL
db_bench=$1_db_bench.txt db_bench=$1_db_bench.txt
@ -60,8 +60,8 @@ fi
EXTRA_CXXFLAGS="" EXTRA_CXXFLAGS=""
GCC_VERSION=$(cc -dumpversion | cut -d. -f1) GCC_VERSION=$(cc -dumpversion | cut -d. -f1)
if (( GCC_VERSION >= 9 )); then if ((GCC_VERSION >= 9)); then
EXTRA_CXXFLAGS+="-Wno-deprecated-copy -Wno-pessimizing-move -Wno-error=stringop-truncation" EXTRA_CXXFLAGS+="-Wno-deprecated-copy -Wno-pessimizing-move -Wno-error=stringop-truncation"
fi fi
$MAKE db_bench $MAKEFLAGS $MAKECONFIG DEBUG_LEVEL=0 SPDK_DIR=$rootdir EXTRA_CXXFLAGS="$EXTRA_CXXFLAGS" $MAKE db_bench $MAKEFLAGS $MAKECONFIG DEBUG_LEVEL=0 SPDK_DIR=$rootdir EXTRA_CXXFLAGS="$EXTRA_CXXFLAGS"

View File

@ -1,9 +1,9 @@
#!/usr/bin/env bash #!/usr/bin/env bash
SYSTEM=$(uname -s) SYSTEM=$(uname -s)
if [ $SYSTEM = "FreeBSD" ] ; then if [ $SYSTEM = "FreeBSD" ]; then
echo "blob_io_wait.sh cannot run on FreeBSD currently." echo "blob_io_wait.sh cannot run on FreeBSD currently."
exit 0 exit 0
fi fi
testdir=$(readlink -f $(dirname $0)) testdir=$(readlink -f $(dirname $0))

View File

@ -1,9 +1,9 @@
#!/usr/bin/env bash #!/usr/bin/env bash
SYSTEM=$(uname -s) SYSTEM=$(uname -s)
if [ $SYSTEM = "FreeBSD" ] ; then if [ $SYSTEM = "FreeBSD" ]; then
echo "blobstore.sh cannot run on FreeBSD currently." echo "blobstore.sh cannot run on FreeBSD currently."
exit 0 exit 0
fi fi
testdir=$(readlink -f $(dirname $0)) testdir=$(readlink -f $(dirname $0))
@ -16,8 +16,8 @@ $rootdir/scripts/gen_nvme.sh > $testdir/blobcli.conf
# generate random data file for import/export diff # generate random data file for import/export diff
dd if=/dev/urandom of=$testdir/test.pattern bs=1M count=1 dd if=/dev/urandom of=$testdir/test.pattern bs=1M count=1
(cd $testdir && (cd $testdir \
$rootdir/examples/blob/cli/blobcli -c $testdir/blobcli.conf -b Nvme0n1 -T $testdir/test.bs > $testdir/btest.out) && $rootdir/examples/blob/cli/blobcli -c $testdir/blobcli.conf -b Nvme0n1 -T $testdir/test.bs > $testdir/btest.out)
# the test script will import the test pattern generated by dd and then export # the test script will import the test pattern generated by dd and then export
# it to a file so we can compare and confirm basic read and write # it to a file so we can compare and confirm basic read and write

View File

@ -12,8 +12,8 @@ VHOST_APP=("$_app_dir/vhost/vhost")
# Check if apps should execute under debug flags # Check if apps should execute under debug flags
if [[ -e $_root/include/spdk/config.h ]]; then if [[ -e $_root/include/spdk/config.h ]]; then
if [[ $(<"$_root/include/spdk/config.h") == *"#define SPDK_CONFIG_DEBUG"* ]] \ if [[ $(< "$_root/include/spdk/config.h") == *"#define SPDK_CONFIG_DEBUG"* ]] \
&& (( SPDK_AUTOTEST_DEBUG_APPS )); then && ((SPDK_AUTOTEST_DEBUG_APPS)); then
VHOST_FUZZ_APP+=("--log-flags=all") VHOST_FUZZ_APP+=("--log-flags=all")
ISCSI_APP+=("--log-flags=all") ISCSI_APP+=("--log-flags=all")
NVMF_APP+=("--log-flags=all") NVMF_APP+=("--log-flags=all")

View File

@ -7,10 +7,10 @@ function xtrace_disable() {
XTRACE_DISABLED="yes" XTRACE_DISABLED="yes"
fi fi
set +x set +x
elif [ -z $XTRACE_NESTING_LEVEL ]; then elif [ -z $XTRACE_NESTING_LEVEL ]; then
XTRACE_NESTING_LEVEL=1 XTRACE_NESTING_LEVEL=1
else else
XTRACE_NESTING_LEVEL=$((++XTRACE_NESTING_LEVEL)) XTRACE_NESTING_LEVEL=$((++XTRACE_NESTING_LEVEL))
fi fi
} }
@ -22,7 +22,7 @@ source "$rootdir/test/common/applications.sh"
if [[ -e $rootdir/test/common/build_config.sh ]]; then if [[ -e $rootdir/test/common/build_config.sh ]]; then
source "$rootdir/test/common/build_config.sh" source "$rootdir/test/common/build_config.sh"
elif [[ -e $rootdir/mk/config.mk ]]; then elif [[ -e $rootdir/mk/config.mk ]]; then
build_config=$(<"$rootdir/mk/config.mk") build_config=$(< "$rootdir/mk/config.mk")
source <(echo "${build_config//\?=/=}") source <(echo "${build_config//\?=/=}")
else else
source "$rootdir/CONFIG" source "$rootdir/CONFIG"
@ -39,8 +39,7 @@ function xtrace_enable() {
# Keep it as alias to avoid xtrace_enable backtrace always pointing to xtrace_restore. # Keep it as alias to avoid xtrace_enable backtrace always pointing to xtrace_restore.
# xtrace_enable will appear as called directly from the user script, from the same line # xtrace_enable will appear as called directly from the user script, from the same line
# that "called" xtrace_restore. # that "called" xtrace_restore.
alias xtrace_restore=\ alias xtrace_restore='if [ -z $XTRACE_NESTING_LEVEL ]; then
'if [ -z $XTRACE_NESTING_LEVEL ]; then
if [[ "$PREV_BASH_OPTS" == *"x"* ]]; then if [[ "$PREV_BASH_OPTS" == *"x"* ]]; then
XTRACE_DISABLED="no"; PREV_BASH_OPTS=""; set -x; xtrace_enable; XTRACE_DISABLED="no"; PREV_BASH_OPTS=""; set -x; xtrace_enable;
fi fi
@ -55,42 +54,78 @@ fi'
export RUN_NIGHTLY export RUN_NIGHTLY
# Set defaults for missing test config options # Set defaults for missing test config options
: ${SPDK_AUTOTEST_DEBUG_APPS:=0}; export SPDK_AUTOTEST_DEBUG_APPS : ${SPDK_AUTOTEST_DEBUG_APPS:=0}
: ${SPDK_RUN_VALGRIND=0}; export SPDK_RUN_VALGRIND export SPDK_AUTOTEST_DEBUG_APPS
: ${SPDK_RUN_FUNCTIONAL_TEST=0}; export SPDK_RUN_FUNCTIONAL_TEST : ${SPDK_RUN_VALGRIND=0}
: ${SPDK_TEST_UNITTEST=0}; export SPDK_TEST_UNITTEST export SPDK_RUN_VALGRIND
: ${SPDK_TEST_AUTOBUILD=0}; export SPDK_TEST_AUTOBUILD : ${SPDK_RUN_FUNCTIONAL_TEST=0}
: ${SPDK_TEST_ISAL=0}; export SPDK_TEST_ISAL export SPDK_RUN_FUNCTIONAL_TEST
: ${SPDK_TEST_ISCSI=0}; export SPDK_TEST_ISCSI : ${SPDK_TEST_UNITTEST=0}
: ${SPDK_TEST_ISCSI_INITIATOR=0}; export SPDK_TEST_ISCSI_INITIATOR export SPDK_TEST_UNITTEST
: ${SPDK_TEST_NVME=0}; export SPDK_TEST_NVME : ${SPDK_TEST_AUTOBUILD=0}
: ${SPDK_TEST_NVME_CLI=0}; export SPDK_TEST_NVME_CLI export SPDK_TEST_AUTOBUILD
: ${SPDK_TEST_NVME_CUSE=0}; export SPDK_TEST_NVME_CUSE : ${SPDK_TEST_ISAL=0}
: ${SPDK_TEST_NVMF=0}; export SPDK_TEST_NVMF export SPDK_TEST_ISAL
: ${SPDK_TEST_NVMF_TRANSPORT="rdma"}; export SPDK_TEST_NVMF_TRANSPORT : ${SPDK_TEST_ISCSI=0}
: ${SPDK_TEST_RBD=0}; export SPDK_TEST_RBD export SPDK_TEST_ISCSI
: ${SPDK_TEST_VHOST=0}; export SPDK_TEST_VHOST : ${SPDK_TEST_ISCSI_INITIATOR=0}
: ${SPDK_TEST_BLOCKDEV=0}; export SPDK_TEST_BLOCKDEV export SPDK_TEST_ISCSI_INITIATOR
: ${SPDK_TEST_IOAT=0}; export SPDK_TEST_IOAT : ${SPDK_TEST_NVME=0}
: ${SPDK_TEST_BLOBFS=0}; export SPDK_TEST_BLOBFS export SPDK_TEST_NVME
: ${SPDK_TEST_VHOST_INIT=0}; export SPDK_TEST_VHOST_INIT : ${SPDK_TEST_NVME_CLI=0}
: ${SPDK_TEST_PMDK=0}; export SPDK_TEST_PMDK export SPDK_TEST_NVME_CLI
: ${SPDK_TEST_LVOL=0}; export SPDK_TEST_LVOL : ${SPDK_TEST_NVME_CUSE=0}
: ${SPDK_TEST_JSON=0}; export SPDK_TEST_JSON export SPDK_TEST_NVME_CUSE
: ${SPDK_TEST_REDUCE=0}; export SPDK_TEST_REDUCE : ${SPDK_TEST_NVMF=0}
: ${SPDK_TEST_VPP=0}; export SPDK_TEST_VPP export SPDK_TEST_NVMF
: ${SPDK_RUN_ASAN=0}; export SPDK_RUN_ASAN : ${SPDK_TEST_NVMF_TRANSPORT="rdma"}
: ${SPDK_RUN_UBSAN=0}; export SPDK_RUN_UBSAN export SPDK_TEST_NVMF_TRANSPORT
: ${SPDK_RUN_INSTALLED_DPDK=0}; export SPDK_RUN_INSTALLED_DPDK : ${SPDK_TEST_RBD=0}
: ${SPDK_RUN_NON_ROOT=0}; export SPDK_RUN_NON_ROOT export SPDK_TEST_RBD
: ${SPDK_TEST_CRYPTO=0}; export SPDK_TEST_CRYPTO : ${SPDK_TEST_VHOST=0}
: ${SPDK_TEST_FTL=0}; export SPDK_TEST_FTL export SPDK_TEST_VHOST
: ${SPDK_TEST_OCF=0}; export SPDK_TEST_OCF : ${SPDK_TEST_BLOCKDEV=0}
: ${SPDK_TEST_FTL_EXTENDED=0}; export SPDK_TEST_FTL_EXTENDED export SPDK_TEST_BLOCKDEV
: ${SPDK_TEST_VMD=0}; export SPDK_TEST_VMD : ${SPDK_TEST_IOAT=0}
: ${SPDK_TEST_OPAL=0}; export SPDK_TEST_OPAL export SPDK_TEST_IOAT
: ${SPDK_AUTOTEST_X=true}; export SPDK_AUTOTEST_X : ${SPDK_TEST_BLOBFS=0}
: ${SPDK_TEST_RAID5=0}; export SPDK_TEST_RAID5 export SPDK_TEST_BLOBFS
: ${SPDK_TEST_VHOST_INIT=0}
export SPDK_TEST_VHOST_INIT
: ${SPDK_TEST_PMDK=0}
export SPDK_TEST_PMDK
: ${SPDK_TEST_LVOL=0}
export SPDK_TEST_LVOL
: ${SPDK_TEST_JSON=0}
export SPDK_TEST_JSON
: ${SPDK_TEST_REDUCE=0}
export SPDK_TEST_REDUCE
: ${SPDK_TEST_VPP=0}
export SPDK_TEST_VPP
: ${SPDK_RUN_ASAN=0}
export SPDK_RUN_ASAN
: ${SPDK_RUN_UBSAN=0}
export SPDK_RUN_UBSAN
: ${SPDK_RUN_INSTALLED_DPDK=0}
export SPDK_RUN_INSTALLED_DPDK
: ${SPDK_RUN_NON_ROOT=0}
export SPDK_RUN_NON_ROOT
: ${SPDK_TEST_CRYPTO=0}
export SPDK_TEST_CRYPTO
: ${SPDK_TEST_FTL=0}
export SPDK_TEST_FTL
: ${SPDK_TEST_OCF=0}
export SPDK_TEST_OCF
: ${SPDK_TEST_FTL_EXTENDED=0}
export SPDK_TEST_FTL_EXTENDED
: ${SPDK_TEST_VMD=0}
export SPDK_TEST_VMD
: ${SPDK_TEST_OPAL=0}
export SPDK_TEST_OPAL
: ${SPDK_AUTOTEST_X=true}
export SPDK_AUTOTEST_X
: ${SPDK_TEST_RAID5=0}
export SPDK_TEST_RAID5
# Export PYTHONPATH with addition of RPC framework. New scripts can be created # Export PYTHONPATH with addition of RPC framework. New scripts can be created
# specific use cases for tests. # specific use cases for tests.
@ -194,7 +229,7 @@ for i in "$@"; do
done done
# start rpc.py coprocess if it's not started yet # start rpc.py coprocess if it's not started yet
if [[ -z $RPC_PIPE_PID ]] || ! kill -0 "$RPC_PIPE_PID" &>/dev/null; then if [[ -z $RPC_PIPE_PID ]] || ! kill -0 "$RPC_PIPE_PID" &> /dev/null; then
coproc RPC_PIPE { "$rootdir/scripts/rpc.py" --server; } coproc RPC_PIPE { "$rootdir/scripts/rpc.py" --server; }
exec {RPC_PIPE_OUTPUT}<&${RPC_PIPE[0]} {RPC_PIPE_INPUT}>&${RPC_PIPE[1]} exec {RPC_PIPE_OUTPUT}<&${RPC_PIPE[0]} {RPC_PIPE_INPUT}>&${RPC_PIPE[1]}
# all descriptors will automatically close together with this bash # all descriptors will automatically close together with this bash
@ -211,8 +246,8 @@ function get_config_params() {
xtrace_disable xtrace_disable
config_params='--enable-debug --enable-werror' config_params='--enable-debug --enable-werror'
if echo -e "#include <libunwind.h>\nint main(int argc, char *argv[]) {return 0;}\n" | \ if echo -e "#include <libunwind.h>\nint main(int argc, char *argv[]) {return 0;}\n" \
gcc -o /dev/null -lunwind -x c - 2>/dev/null; then | gcc -o /dev/null -lunwind -x c - 2> /dev/null; then
config_params+=' --enable-log-bt' config_params+=' --enable-log-bt'
fi fi
@ -221,13 +256,13 @@ function get_config_params() {
config_params+=' --with-rdma' config_params+=' --with-rdma'
fi fi
if [ $(uname -s) == "FreeBSD" ]; then if [ $(uname -s) == "FreeBSD" ]; then
intel="hw.model: Intel" intel="hw.model: Intel"
cpu_vendor=$(sysctl -a | grep hw.model | cut -c 1-15) cpu_vendor=$(sysctl -a | grep hw.model | cut -c 1-15)
else else
intel="GenuineIntel" intel="GenuineIntel"
cpu_vendor=$(grep -i 'vendor' /proc/cpuinfo --max-count=1) cpu_vendor=$(grep -i 'vendor' /proc/cpuinfo --max-count=1)
fi fi
if [[ "$cpu_vendor" != *"$intel"* ]]; then if [[ "$cpu_vendor" != *"$intel"* ]]; then
config_params+=" --without-idxd" config_params+=" --without-idxd"
else else
@ -268,7 +303,7 @@ function get_config_params() {
fi fi
fi fi
if [ -d /usr/include/rbd ] && [ -d /usr/include/rados ] && [ $SPDK_TEST_RBD -eq 1 ]; then if [ -d /usr/include/rbd ] && [ -d /usr/include/rados ] && [ $SPDK_TEST_RBD -eq 1 ]; then
config_params+=' --with-rbd' config_params+=' --with-rbd'
fi fi
@ -375,7 +410,7 @@ function rpc_cmd_simple_data_json() {
while read -r elem val; do while read -r elem val; do
jq_out["$elem"]=$val jq_out["$elem"]=$val
done < <(rpc_cmd "$@" | jq -jr "$jq") done < <(rpc_cmd "$@" | jq -jr "$jq")
(( ${#jq_out[@]} > 0 )) || return 1 ((${#jq_out[@]} > 0)) || return 1
} }
# invert error code of any command and also trigger ERR on 0 (unlike bash ! prefix) # invert error code of any command and also trigger ERR on 0 (unlike bash ! prefix)
@ -428,7 +463,7 @@ function timing_finish() {
--nametype 'Step:' \ --nametype 'Step:' \
--countname seconds \ --countname seconds \
$output_dir/timing.txt \ $output_dir/timing.txt \
>$output_dir/timing.svg > $output_dir/timing.svg
fi fi
} }
@ -439,8 +474,8 @@ function create_test_list() {
# Follow up with search in test directory recursively. # Follow up with search in test directory recursively.
completion+=$(grep -rshI --include="*.sh" --exclude="autotest_common.sh" -e "run_test " $rootdir/test) completion+=$(grep -rshI --include="*.sh" --exclude="autotest_common.sh" -e "run_test " $rootdir/test)
printf "%s" "$completion" | grep -v "#" \ printf "%s" "$completion" | grep -v "#" \
| sed 's/^.*run_test/run_test/' | awk '{print $2}' | \ | sed 's/^.*run_test/run_test/' | awk '{print $2}' \
sed 's/\"//g' | sort > $output_dir/all_tests.txt || true | sed 's/\"//g' | sort > $output_dir/all_tests.txt || true
xtrace_restore xtrace_restore
} }
@ -464,15 +499,14 @@ function gdb_attach() {
function process_core() { function process_core() {
ret=0 ret=0
while IFS= read -r -d '' core; while IFS= read -r -d '' core; do
do
exe=$(eu-readelf -n "$core" | grep psargs | sed "s/.*psargs: \([^ \'\" ]*\).*/\1/") exe=$(eu-readelf -n "$core" | grep psargs | sed "s/.*psargs: \([^ \'\" ]*\).*/\1/")
if [[ ! -f "$exe" ]]; then if [[ ! -f "$exe" ]]; then
exe=$(eu-readelf -n "$core" | grep -oP -m1 "$exe.+") exe=$(eu-readelf -n "$core" | grep -oP -m1 "$exe.+")
fi fi
echo "exe for $core is $exe" echo "exe for $core is $exe"
if [[ -n "$exe" ]]; then if [[ -n "$exe" ]]; then
if hash gdb &>/dev/null; then if hash gdb &> /dev/null; then
gdb -batch -ex "thread apply all bt full" $exe $core gdb -batch -ex "thread apply all bt full" $exe $core
fi fi
cp $exe $output_dir cp $exe $output_dir
@ -521,7 +555,7 @@ function waitforlisten() {
xtrace_disable xtrace_disable
local ret=0 local ret=0
local i local i
for (( i = 40; i != 0; i-- )); do for ((i = 40; i != 0; i--)); do
# if the process is no longer running, then exit the script # if the process is no longer running, then exit the script
# since it means the application crashed # since it means the application crashed
if ! kill -s 0 $1; then if ! kill -s 0 $1; then
@ -530,7 +564,7 @@ function waitforlisten() {
break break
fi fi
if $rootdir/scripts/rpc.py -t 1 -s "$rpc_addr" rpc_get_methods &>/dev/null; then if $rootdir/scripts/rpc.py -t 1 -s "$rpc_addr" rpc_get_methods &> /dev/null; then
break break
fi fi
@ -538,7 +572,7 @@ function waitforlisten() {
done done
xtrace_restore xtrace_restore
if (( i == 0 )); then if ((i == 0)); then
echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$rpc_addr'" echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$rpc_addr'"
ret=1 ret=1
fi fi
@ -549,7 +583,7 @@ function waitfornbd() {
local nbd_name=$1 local nbd_name=$1
local i local i
for ((i=1; i<=20; i++)); do for ((i = 1; i <= 20; i++)); do
if grep -q -w $nbd_name /proc/partitions; then if grep -q -w $nbd_name /proc/partitions; then
break break
else else
@ -562,7 +596,7 @@ function waitfornbd() {
# here trying to read the first block of the nbd block device to a temp # here trying to read the first block of the nbd block device to a temp
# file. Note that dd returns success when reading an empty file, so we # file. Note that dd returns success when reading an empty file, so we
# need to check the size of the output file instead. # need to check the size of the output file instead.
for ((i=1; i<=20; i++)); do for ((i = 1; i <= 20; i++)); do
dd if=/dev/$nbd_name of=/tmp/nbdtest bs=4096 count=1 iflag=direct dd if=/dev/$nbd_name of=/tmp/nbdtest bs=4096 count=1 iflag=direct
size=$(stat -c %s /tmp/nbdtest) size=$(stat -c %s /tmp/nbdtest)
rm -f /tmp/nbdtest rm -f /tmp/nbdtest
@ -580,7 +614,7 @@ function waitforbdev() {
local bdev_name=$1 local bdev_name=$1
local i local i
for ((i=1; i<=20; i++)); do for ((i = 1; i <= 20; i++)); do
if $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qw $bdev_name; then if $rpc_py bdev_get_bdevs | jq -r '.[] .name' | grep -qw $bdev_name; then
return 0 return 0
fi fi
@ -611,7 +645,7 @@ function make_filesystem() {
if [ $i -ge 15 ]; then if [ $i -ge 15 ]; then
return 1 return 1
fi fi
i=$((i+1)) i=$((i + 1))
sleep 1 sleep 1
done done
@ -715,7 +749,7 @@ function _start_stub() {
# but ASLR can still be unreliable in some cases. # but ASLR can still be unreliable in some cases.
# We will reenable it again after multi-process testing is complete in kill_stub(). # We will reenable it again after multi-process testing is complete in kill_stub().
# Save current setting so it can be restored upon calling kill_stub(). # Save current setting so it can be restored upon calling kill_stub().
_randomize_va_space=$(</proc/sys/kernel/randomize_va_space) _randomize_va_space=$(< /proc/sys/kernel/randomize_va_space)
echo 0 > /proc/sys/kernel/randomize_va_space echo 0 > /proc/sys/kernel/randomize_va_space
$rootdir/test/app/stub/stub $1 & $rootdir/test/app/stub/stub $1 &
stubpid=$! stubpid=$!
@ -739,7 +773,7 @@ function kill_stub() {
if [[ -e /proc/$stubpid ]]; then if [[ -e /proc/$stubpid ]]; then
kill $1 $stubpid kill $1 $stubpid
wait $stubpid wait $stubpid
fi 2>/dev/null || : fi 2> /dev/null || :
rm -f /var/run/spdk_stub0 rm -f /var/run/spdk_stub0
# Re-enable ASLR now that we are done with multi-process testing # Re-enable ASLR now that we are done with multi-process testing
# Note: "1" enables ASLR w/o randomizing data segments, "2" adds data segment # Note: "1" enables ASLR w/o randomizing data segments, "2" adds data segment
@ -811,12 +845,12 @@ function print_backtrace() {
local bt="" cmdline=() local bt="" cmdline=()
if [[ -f $src ]]; then if [[ -f $src ]]; then
bt=$(nl -w 4 -ba -nln $src | grep -B 5 -A 5 "^${line_nr}[^0-9]" | \ bt=$(nl -w 4 -ba -nln $src | grep -B 5 -A 5 "^${line_nr}[^0-9]" \
sed "s/^/ /g" | sed "s/^ $line_nr /=> $line_nr /g") | sed "s/^/ /g" | sed "s/^ $line_nr /=> $line_nr /g")
fi fi
# If extdebug set the BASH_ARGC[i], try to fetch all the args # If extdebug set the BASH_ARGC[i], try to fetch all the args
if (( BASH_ARGC[i] > 0 )); then if ((BASH_ARGC[i] > 0)); then
# Use argc as index to reverse the stack # Use argc as index to reverse the stack
local argc=${BASH_ARGC[i]} arg local argc=${BASH_ARGC[i]} arg
for arg in "${args[@]::BASH_ARGC[i]}"; do for arg in "${args[@]::BASH_ARGC[i]}"; do
@ -825,7 +859,10 @@ function print_backtrace() {
args=("${args[@]:BASH_ARGC[i]}") args=("${args[@]:BASH_ARGC[i]}")
fi fi
echo "in $src:$line_nr -> $func($(IFS=","; printf '%s\n' "${cmdline[*]:-[]}"))" echo "in $src:$line_nr -> $func($(
IFS=","
printf '%s\n' "${cmdline[*]:-[]}"
))"
echo " ..." echo " ..."
echo "${bt:-backtrace unavailable}" echo "${bt:-backtrace unavailable}"
echo " ..." echo " ..."
@ -836,8 +873,7 @@ function print_backtrace() {
return 0 return 0
} }
function discover_bdevs() function discover_bdevs() {
{
local rootdir=$1 local rootdir=$1
local config_file=$2 local config_file=$2
local cfg_type=$3 local cfg_type=$3
@ -856,11 +892,11 @@ function discover_bdevs()
# Start the bdev service to query for the list of available # Start the bdev service to query for the list of available
# bdevs. # bdevs.
$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 \ $rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 \
$cfg_type $config_file &>/dev/null & $cfg_type $config_file &> /dev/null &
stubpid=$! stubpid=$!
while ! [ -e /var/run/spdk_bdev0 ]; do while ! [ -e /var/run/spdk_bdev0 ]; do
# If this counter drops to zero, errexit will be caught to abort the test # If this counter drops to zero, errexit will be caught to abort the test
(( wait_for_spdk_bdev-- )) ((wait_for_spdk_bdev--))
sleep 1 sleep 1
done done
@ -873,8 +909,7 @@ function discover_bdevs()
rm -f /var/run/spdk_bdev0 rm -f /var/run/spdk_bdev0
} }
function waitforserial() function waitforserial() {
{
local i=0 local i=0
local nvme_device_counter=1 local nvme_device_counter=1
if [[ -n "$2" ]]; then if [[ -n "$2" ]]; then
@ -883,7 +918,7 @@ function waitforserial()
while [ $(lsblk -l -o NAME,SERIAL | grep -c $1) -lt $nvme_device_counter ]; do while [ $(lsblk -l -o NAME,SERIAL | grep -c $1) -lt $nvme_device_counter ]; do
[ $i -lt 15 ] || break [ $i -lt 15 ] || break
i=$((i+1)) i=$((i + 1))
echo "Waiting for devices" echo "Waiting for devices"
sleep 1 sleep 1
done done
@ -892,15 +927,14 @@ function waitforserial()
return 1 return 1
fi fi
return 0 return 0
} }
function waitforserial_disconnect() function waitforserial_disconnect() {
{
local i=0 local i=0
while lsblk -o NAME,SERIAL | grep -q -w $1; do while lsblk -o NAME,SERIAL | grep -q -w $1; do
[ $i -lt 15 ] || break [ $i -lt 15 ] || break
i=$((i+1)) i=$((i + 1))
echo "Waiting for disconnect devices" echo "Waiting for disconnect devices"
sleep 1 sleep 1
done done
@ -912,12 +946,11 @@ function waitforserial_disconnect()
return 0 return 0
} }
function waitforblk() function waitforblk() {
{
local i=0 local i=0
while ! lsblk -l -o NAME | grep -q -w $1; do while ! lsblk -l -o NAME | grep -q -w $1; do
[ $i -lt 15 ] || break [ $i -lt 15 ] || break
i=$((i+1)) i=$((i + 1))
sleep 1 sleep 1
done done
@ -928,12 +961,11 @@ function waitforblk()
return 0 return 0
} }
function waitforblk_disconnect() function waitforblk_disconnect() {
{
local i=0 local i=0
while lsblk -l -o NAME | grep -q -w $1; do while lsblk -l -o NAME | grep -q -w $1; do
[ $i -lt 15 ] || break [ $i -lt 15 ] || break
i=$((i+1)) i=$((i + 1))
sleep 1 sleep 1
done done
@ -944,12 +976,11 @@ function waitforblk_disconnect()
return 0 return 0
} }
function waitforfile() function waitforfile() {
{
local i=0 local i=0
while [ ! -e $1 ]; do while [ ! -e $1 ]; do
[ $i -lt 200 ] || break [ $i -lt 200 ] || break
i=$((i+1)) i=$((i + 1))
sleep 0.1 sleep 0.1
done done
@ -960,8 +991,7 @@ function waitforfile()
return 0 return 0
} }
function fio_config_gen() function fio_config_gen() {
{
local config_file=$1 local config_file=$1
local workload=$2 local workload=$2
local bdev_type=$3 local bdev_type=$3
@ -991,9 +1021,9 @@ EOL
if [ "$workload" == "verify" ]; then if [ "$workload" == "verify" ]; then
cat <<- EOL >> $config_file cat <<- EOL >> $config_file
verify=sha1 verify=sha1
verify_backlog=1024 verify_backlog=1024
rw=randwrite rw=randwrite
EOL EOL
# To avoid potential data race issue due to the AIO device # To avoid potential data race issue due to the AIO device
@ -1011,8 +1041,7 @@ EOL
fi fi
} }
function fio_bdev() function fio_bdev() {
{
# Setup fio binary cmd line # Setup fio binary cmd line
local fio_dir=$CONFIG_FIO_SOURCE_DIR local fio_dir=$CONFIG_FIO_SOURCE_DIR
local bdev_plugin="$rootdir/examples/bdev/fio_plugin/fio_plugin" local bdev_plugin="$rootdir/examples/bdev/fio_plugin/fio_plugin"
@ -1024,8 +1053,7 @@ function fio_bdev()
LD_PRELOAD="$asan_lib $bdev_plugin" "$fio_dir"/fio "$@" LD_PRELOAD="$asan_lib $bdev_plugin" "$fio_dir"/fio "$@"
} }
function fio_nvme() function fio_nvme() {
{
# Setup fio binary cmd line # Setup fio binary cmd line
local fio_dir=$CONFIG_FIO_SOURCE_DIR local fio_dir=$CONFIG_FIO_SOURCE_DIR
local nvme_plugin="$rootdir/examples/nvme/fio_plugin/fio_plugin" local nvme_plugin="$rootdir/examples/nvme/fio_plugin/fio_plugin"
@ -1036,8 +1064,7 @@ function fio_nvme()
LD_PRELOAD="$asan_lib $nvme_plugin" "$fio_dir"/fio "$@" LD_PRELOAD="$asan_lib $nvme_plugin" "$fio_dir"/fio "$@"
} }
function get_lvs_free_mb() function get_lvs_free_mb() {
{
local lvs_uuid=$1 local lvs_uuid=$1
local lvs_info local lvs_info
local fc local fc
@ -1047,12 +1074,11 @@ function get_lvs_free_mb()
cs=$(jq ".[] | select(.uuid==\"$lvs_uuid\") .cluster_size" <<< "$lvs_info") cs=$(jq ".[] | select(.uuid==\"$lvs_uuid\") .cluster_size" <<< "$lvs_info")
# Change to MB's # Change to MB's
free_mb=$((fc*cs/1024/1024)) free_mb=$((fc * cs / 1024 / 1024))
echo "$free_mb" echo "$free_mb"
} }
function get_bdev_size() function get_bdev_size() {
{
local bdev_name=$1 local bdev_name=$1
local bdev_info local bdev_info
local bs local bs
@ -1062,12 +1088,11 @@ function get_bdev_size()
nb=$(jq ".[] .num_blocks" <<< "$bdev_info") nb=$(jq ".[] .num_blocks" <<< "$bdev_info")
# Change to MB's # Change to MB's
bdev_size=$((bs*nb/1024/1024)) bdev_size=$((bs * nb / 1024 / 1024))
echo "$bdev_size" echo "$bdev_size"
} }
function autotest_cleanup() function autotest_cleanup() {
{
$rootdir/scripts/setup.sh reset $rootdir/scripts/setup.sh reset
$rootdir/scripts/setup.sh cleanup $rootdir/scripts/setup.sh cleanup
if [ $(uname -s) = "Linux" ]; then if [ $(uname -s) = "Linux" ]; then
@ -1080,8 +1105,7 @@ function autotest_cleanup()
rm -rf "$asan_suppression_file" rm -rf "$asan_suppression_file"
} }
function freebsd_update_contigmem_mod() function freebsd_update_contigmem_mod() {
{
if [ $(uname) = FreeBSD ]; then if [ $(uname) = FreeBSD ]; then
kldunload contigmem.ko || true kldunload contigmem.ko || true
if [ -n "$WITH_DPDK_DIR" ]; then if [ -n "$WITH_DPDK_DIR" ]; then
@ -1099,7 +1123,7 @@ function freebsd_update_contigmem_mod()
fi fi
} }
function get_nvme_name_from_bdf { function get_nvme_name_from_bdf() {
blkname=() blkname=()
nvme_devs=$(lsblk -d --output NAME | grep "^nvme") || true nvme_devs=$(lsblk -d --output NAME | grep "^nvme") || true
@ -1120,7 +1144,7 @@ function get_nvme_name_from_bdf {
printf '%s\n' "${blkname[@]}" printf '%s\n' "${blkname[@]}"
} }
function opal_revert_cleanup { function opal_revert_cleanup() {
$rootdir/app/spdk_tgt/spdk_tgt & $rootdir/app/spdk_tgt/spdk_tgt &
spdk_tgt_pid=$! spdk_tgt_pid=$!
waitforlisten $spdk_tgt_pid waitforlisten $spdk_tgt_pid
@ -1138,14 +1162,14 @@ function opal_revert_cleanup {
# Get BDF addresses of all NVMe drives currently attached to # Get BDF addresses of all NVMe drives currently attached to
# uio-pci-generic or vfio-pci # uio-pci-generic or vfio-pci
function get_nvme_bdfs() { function get_nvme_bdfs() {
xtrace_disable xtrace_disable
jq -r .config[].params.traddr <<< $(scripts/gen_nvme.sh --json) jq -r .config[].params.traddr <<< $(scripts/gen_nvme.sh --json)
xtrace_restore xtrace_restore
} }
# Same as function above, but just get the first disks BDF address # Same as function above, but just get the first disks BDF address
function get_first_nvme_bdf() { function get_first_nvme_bdf() {
head -1 <<< $(get_nvme_bdfs) head -1 <<< $(get_nvme_bdfs)
} }
set -o errtrace set -o errtrace

File diff suppressed because it is too large Load Diff

View File

@ -63,11 +63,13 @@ function run_bdevperf() {
test_type=$1 test_type=$1
case "$test_type" in case "$test_type" in
qat ) qat)
pmd=1;; pmd=1
isal ) ;;
pmd=2;; isal)
* ) pmd=2
;;
*)
echo "invalid pmd name" echo "invalid pmd name"
exit 1 exit 1
;; ;;

View File

@ -10,7 +10,7 @@ device=$1
use_append=$2 use_append=$2
rpc_py=$rootdir/scripts/rpc.py rpc_py=$rootdir/scripts/rpc.py
for (( i=0; i<${#tests[@]}; i++ )) do for ((i = 0; i < ${#tests[@]}; i++)); do
timing_enter "${tests[$i]}" timing_enter "${tests[$i]}"
"$rootdir/test/bdev/bdevperf/bdevperf" -z -T ftl0 ${tests[$i]} --json <(gen_ftl_nvme_conf) & "$rootdir/test/bdev/bdevperf/bdevperf" -z -T ftl0 ${tests[$i]} --json <(gen_ftl_nvme_conf) &
bdevperf_pid=$! bdevperf_pid=$!

View File

@ -1,24 +1,24 @@
# Common utility functions to be sourced by the libftl test scripts # Common utility functions to be sourced by the libftl test scripts
function get_chunk_size() { function get_chunk_size() {
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" | $rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" \
grep 'Logical blks per chunk' | sed 's/[^0-9]//g' | grep 'Logical blks per chunk' | sed 's/[^0-9]//g'
} }
function get_num_group() { function get_num_group() {
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" | $rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" \
grep 'Groups' | sed 's/[^0-9]//g' | grep 'Groups' | sed 's/[^0-9]//g'
} }
function get_num_pu() { function get_num_pu() {
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" | $rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" \
grep 'PUs' | sed 's/[^0-9]//g' | grep 'PUs' | sed 's/[^0-9]//g'
} }
function has_separate_md() { function has_separate_md() {
local md_type local md_type
md_type=$($rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" | \ md_type=$($rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:$1" \
grep 'Metadata Transferred' | cut -d: -f2) | grep 'Metadata Transferred' | cut -d: -f2)
if [[ "$md_type" =~ Separate ]]; then if [[ "$md_type" =~ Separate ]]; then
return 0 return 0
else else
@ -48,7 +48,7 @@ function create_nv_cache_bdev() {
} }
function gen_ftl_nvme_conf() { function gen_ftl_nvme_conf() {
jq . <<-JSON jq . <<- JSON
{ {
"subsystems": [ "subsystems": [
{ {

View File

@ -14,7 +14,7 @@ while getopts ':u:c:' opt; do
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;; ?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
esac esac
done done
shift $((OPTIND -1)) shift $((OPTIND - 1))
device=$1 device=$1
@ -37,7 +37,8 @@ pu_count=$((num_group * num_pu))
# Write one band worth of data + one extra chunk # Write one band worth of data + one extra chunk
data_size=$((chunk_size * (pu_count + 1))) data_size=$((chunk_size * (pu_count + 1)))
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$! "$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid waitforlisten $svcpid
if [ -n "$nv_cache" ]; then if [ -n "$nv_cache" ]; then
@ -49,7 +50,7 @@ $rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1 -o" ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1 -o"
[ -n "$nvc_bdev" ] && ftl_construct_args+=" -c $nvc_bdev" [ -n "$nvc_bdev" ] && ftl_construct_args+=" -c $nvc_bdev"
[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid" [ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
$rpc_py $ftl_construct_args $rpc_py $ftl_construct_args
@ -69,7 +70,8 @@ $rpc_py nbd_stop_disk /dev/nbd0
kill -9 $svcpid kill -9 $svcpid
rm -f /dev/shm/spdk_tgt_trace.pid$svcpid rm -f /dev/shm/spdk_tgt_trace.pid$svcpid
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init & svcpid=$! "$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
svcpid=$!
waitforlisten $svcpid waitforlisten $svcpid
$rpc_py load_config < $testdir/config/ftl.json $rpc_py load_config < $testdir/config/ftl.json

View File

@ -35,7 +35,8 @@ export FTL_JSON_CONF=$testdir/config/ftl.json
trap "fio_kill; exit 1" SIGINT SIGTERM EXIT trap "fio_kill; exit 1" SIGINT SIGTERM EXIT
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$! "$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid waitforlisten $svcpid
$rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie $rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
@ -50,7 +51,7 @@ fi
waitforbdev ftl0 waitforbdev ftl0
( (
echo '{"subsystems": ['; echo '{"subsystems": ['
$rpc_py save_subsystem_config -n bdev $rpc_py save_subsystem_config -n bdev
echo ']}' echo ']}'
) > $FTL_JSON_CONF ) > $FTL_JSON_CONF

View File

@ -31,7 +31,7 @@ PCI_WHITELIST="$device" PCI_BLACKLIST="" DRIVER_OVERRIDE="" ./scripts/setup.sh
# Use first regular NVMe disk (non-OC) as non-volatile cache # Use first regular NVMe disk (non-OC) as non-volatile cache
nvme_disks=$($rootdir/scripts/gen_nvme.sh --json | jq -r \ nvme_disks=$($rootdir/scripts/gen_nvme.sh --json | jq -r \
".config[] | select(.params.traddr != \"$device\").params.traddr") ".config[] | select(.params.traddr != \"$device\").params.traddr")
for disk in $nvme_disks; do for disk in $nvme_disks; do
if has_separate_md $disk; then if has_separate_md $disk; then
@ -62,7 +62,8 @@ run_test "ftl_json" $testdir/json.sh $device
if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then if [ $SPDK_TEST_FTL_EXTENDED -eq 1 ]; then
run_test "ftl_fio_basic" $testdir/fio.sh $device basic run_test "ftl_fio_basic" $testdir/fio.sh $device basic
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$! "$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
trap 'killprocess $svcpid; exit 1' SIGINT SIGTERM EXIT trap 'killprocess $svcpid; exit 1' SIGINT SIGTERM EXIT

View File

@ -15,7 +15,8 @@ json_kill() {
trap "json_kill; exit 1" SIGINT SIGTERM EXIT trap "json_kill; exit 1" SIGINT SIGTERM EXIT
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$! "$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
waitforlisten $svcpid waitforlisten $svcpid
# Create new bdev from json configuration # Create new bdev from json configuration

View File

@ -16,7 +16,7 @@ while getopts ':u:c:' opt; do
?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;; ?) echo "Usage: $0 [-u UUID] [-c NV_CACHE_PCI_BDF] OCSSD_PCI_BDF" && exit 1 ;;
esac esac
done done
shift $((OPTIND -1)) shift $((OPTIND - 1))
device=$1 device=$1
num_group=$(get_num_group $device) num_group=$(get_num_group $device)
num_pu=$(get_num_pu $device) num_pu=$(get_num_pu $device)
@ -37,7 +37,8 @@ restore_kill() {
trap "restore_kill; exit 1" SIGINT SIGTERM EXIT trap "restore_kill; exit 1" SIGINT SIGTERM EXIT
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) & svcpid=$! "$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) &
svcpid=$!
# Wait until spdk_tgt starts # Wait until spdk_tgt starts
waitforlisten $svcpid waitforlisten $svcpid
@ -49,7 +50,7 @@ $rpc_py bdev_nvme_attach_controller -b nvme0 -a $device -t pcie
$rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1 $rpc_py bdev_ocssd_create -c nvme0 -b nvme0n1 -n 1
ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1" ftl_construct_args="bdev_ftl_create -b ftl0 -d nvme0n1"
[ -n "$uuid" ] && ftl_construct_args+=" -u $uuid" [ -n "$uuid" ] && ftl_construct_args+=" -u $uuid"
[ -n "$nv_cache" ] && ftl_construct_args+=" -c $nvc_bdev" [ -n "$nv_cache" ] && ftl_construct_args+=" -c $nvc_bdev"
$rpc_py $ftl_construct_args $rpc_py $ftl_construct_args
@ -73,7 +74,8 @@ md5sum $mount_dir/testfile > $testdir/testfile.md5
umount $mount_dir umount $mount_dir
killprocess $svcpid killprocess $svcpid
"$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init & svcpid=$! "$rootdir/app/spdk_tgt/spdk_tgt" --json <(gen_ftl_nvme_conf) -L ftl_init &
svcpid=$!
# Wait until spdk_tgt starts # Wait until spdk_tgt starts
waitforlisten $svcpid waitforlisten $svcpid

View File

@ -27,12 +27,12 @@ done
timing_enter autofuzz timing_enter autofuzz
if [ "$TEST_MODULE" == "nvmf" ]; then if [ "$TEST_MODULE" == "nvmf" ]; then
allowed_transports=( "${allowed_nvme_transports[@]}" ) allowed_transports=("${allowed_nvme_transports[@]}")
if [ $TEST_TRANSPORT == "rdma" ]; then if [ $TEST_TRANSPORT == "rdma" ]; then
config_params="$config_params --with-rdma" config_params="$config_params --with-rdma"
fi fi
elif [ "$TEST_MODULE" == "vhost" ]; then elif [ "$TEST_MODULE" == "vhost" ]; then
allowed_transports=( "${allowed_vhost_transports[@]}" ) allowed_transports=("${allowed_vhost_transports[@]}")
config_params="$config_params --with-vhost --with-virtio" config_params="$config_params --with-vhost --with-virtio"
else else
echo "Invalid module specified. Please specify either nvmf or vhost." echo "Invalid module specified. Please specify either nvmf or vhost."

View File

@ -30,15 +30,16 @@ TEST_TIMEOUT=1200
# This argument is used in addition to the test arguments in autotest_common.sh # This argument is used in addition to the test arguments in autotest_common.sh
for i in "$@"; do for i in "$@"; do
case "$i" in case "$i" in
--timeout=*) --timeout=*)
TEST_TIMEOUT="${i#*=}" TEST_TIMEOUT="${i#*=}"
esac ;;
esac
done done
timing_enter start_iscsi_tgt timing_enter start_iscsi_tgt
"${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK &>$output_dir/iscsi_autofuzz_tgt_output.txt & "${ISCSI_APP[@]}" -m $ISCSI_TEST_CORE_MASK &> $output_dir/iscsi_autofuzz_tgt_output.txt &
iscsipid=$! iscsipid=$!
trap 'killprocess $iscsipid; exit 1' SIGINT SIGTERM EXIT trap 'killprocess $iscsipid; exit 1' SIGINT SIGTERM EXIT
@ -58,7 +59,7 @@ sleep 1
trap 'killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT trap 'killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
$rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t $TEST_TIMEOUT 2>$output_dir/iscsi_autofuzz_logs.txt $rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t $TEST_TIMEOUT 2> $output_dir/iscsi_autofuzz_logs.txt
$rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:disk1' $rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:disk1'

View File

@ -11,10 +11,11 @@ TEST_TIMEOUT=1200
# This argument is used in addition to the test arguments in autotest_common.sh # This argument is used in addition to the test arguments in autotest_common.sh
for i in "$@"; do for i in "$@"; do
case "$i" in case "$i" in
--timeout=*) --timeout=*)
TEST_TIMEOUT="${i#*=}" TEST_TIMEOUT="${i#*=}"
esac ;;
esac
done done
nvmftestinit nvmftestinit
@ -24,7 +25,7 @@ timing_enter nvmf_fuzz_test
echo "[Nvme]" > $testdir/nvmf_fuzz.conf echo "[Nvme]" > $testdir/nvmf_fuzz.conf
echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf
"${NVMF_APP[@]}" -m 0xF &>"$output_dir/nvmf_autofuzz_tgt_output.txt" & "${NVMF_APP[@]}" -m 0xF &> "$output_dir/nvmf_autofuzz_tgt_output.txt" &
nvmfpid=$! nvmfpid=$!
trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
@ -39,7 +40,7 @@ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
# Note that we chose a consistent seed to ensure that this test is consistent in nightly builds. # Note that we chose a consistent seed to ensure that this test is consistent in nightly builds.
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t $TEST_TIMEOUT -C $testdir/nvmf_fuzz.conf -N -a 2>$output_dir/nvmf_autofuzz_logs.txt $rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t $TEST_TIMEOUT -C $testdir/nvmf_fuzz.conf -N -a 2> $output_dir/nvmf_autofuzz_logs.txt
rm -f $testdir/nvmf_fuzz.conf rm -f $testdir/nvmf_fuzz.conf
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1

View File

@ -16,10 +16,11 @@ fuzz_specific_rpc_py="$rootdir/test/app/fuzz/common/fuzz_rpc.py -s $FUZZ_RPC_SOC
# This argument is used in addition to the test arguments in autotest_common.sh # This argument is used in addition to the test arguments in autotest_common.sh
for i in "$@"; do for i in "$@"; do
case "$i" in case "$i" in
--timeout=*) --timeout=*)
TEST_TIMEOUT="${i#*=}" TEST_TIMEOUT="${i#*=}"
esac ;;
esac
done done
timing_enter vhost_fuzz_test timing_enter vhost_fuzz_test
@ -29,39 +30,39 @@ timing_enter setup
$rootdir/scripts/setup.sh $rootdir/scripts/setup.sh
timing_exit setup timing_exit setup
"${VHOST_APP[@]}" &>"$output_dir/vhost_fuzz_tgt_output.txt" & "${VHOST_APP[@]}" &> "$output_dir/vhost_fuzz_tgt_output.txt" &
vhostpid=$! vhostpid=$!
waitforlisten $vhostpid waitforlisten $vhostpid
trap 'killprocess $vhostpid; exit 1' SIGINT SIGTERM exit trap 'killprocess $vhostpid; exit 1' SIGINT SIGTERM exit
"${VHOST_FUZZ_APP[@]}" -t $TEST_TIMEOUT 2>"$output_dir/vhost_autofuzz_output1.txt" & "${VHOST_FUZZ_APP[@]}" -t $TEST_TIMEOUT 2> "$output_dir/vhost_autofuzz_output1.txt" &
fuzzpid=$! fuzzpid=$!
waitforlisten $fuzzpid $FUZZ_RPC_SOCK waitforlisten $fuzzpid $FUZZ_RPC_SOCK
trap 'killprocess $vhostpid; killprocess $fuzzpid; exit 1' SIGINT SIGTERM exit trap 'killprocess $vhostpid; killprocess $fuzzpid; exit 1' SIGINT SIGTERM exit
if [ "$TEST_TRANSPORT" == "bdev" ] || [ "$TEST_TRANSPORT" == "all" ]; then if [ "$TEST_TRANSPORT" == "bdev" ] || [ "$TEST_TRANSPORT" == "all" ]; then
$vhost_rpc_py bdev_malloc_create -b Malloc0 64 512 $vhost_rpc_py bdev_malloc_create -b Malloc0 64 512
$vhost_rpc_py vhost_create_blk_controller Vhost.1 Malloc0 $vhost_rpc_py vhost_create_blk_controller Vhost.1 Malloc0
# test the vhost blk controller with valid data buffers. # test the vhost blk controller with valid data buffers.
$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/Vhost.1 -b -v $fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/Vhost.1 -b -v
fi fi
if [ "$TEST_TRANSPORT" == "scsi" ] || [ "$TEST_TRANSPORT" == "all" ]; then if [ "$TEST_TRANSPORT" == "scsi" ] || [ "$TEST_TRANSPORT" == "all" ]; then
$vhost_rpc_py bdev_malloc_create -b Malloc1 64 512 $vhost_rpc_py bdev_malloc_create -b Malloc1 64 512
$vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.1 $vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.1
$vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.1 0 Malloc1 $vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.1 0 Malloc1
$vhost_rpc_py bdev_malloc_create -b Malloc2 64 512 $vhost_rpc_py bdev_malloc_create -b Malloc2 64 512
$vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.2 $vhost_rpc_py vhost_create_scsi_controller naa.VhostScsi0.2
$vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.2 0 Malloc2 $vhost_rpc_py vhost_scsi_controller_add_target naa.VhostScsi0.2 0 Malloc2
# test the vhost scsi I/O queue with valid data buffers on a valid lun. # test the vhost scsi I/O queue with valid data buffers on a valid lun.
$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.1 -l -v $fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.1 -l -v
# test the vhost scsi management queue with valid data buffers. # test the vhost scsi management queue with valid data buffers.
$fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.2 -v -m $fuzz_specific_rpc_py fuzz_vhost_create_dev -s $(pwd)/naa.VhostScsi0.2 -v -m
fi fi
# The test won't actually begin until this option is passed in. # The test won't actually begin until this option is passed in.

View File

@ -78,8 +78,8 @@ function iscsitestinit() {
function waitforiscsidevices() { function waitforiscsidevices() {
local num=$1 local num=$1
for ((i=1; i<=20; i++)); do for ((i = 1; i <= 20; i++)); do
n=$( iscsiadm -m session -P 3 | grep -c "Attached scsi disk sd[a-z]*" || true) n=$(iscsiadm -m session -P 3 | grep -c "Attached scsi disk sd[a-z]*" || true)
if [ $n -ne $num ]; then if [ $n -ne $num ]; then
sleep 0.1 sleep 0.1
else else
@ -107,7 +107,7 @@ function start_vpp() {
# for VPP side maximal size of MTU for TCP is 1460 and tests doesn't work # for VPP side maximal size of MTU for TCP is 1460 and tests doesn't work
# stable with larger packets # stable with larger packets
MTU=1460 MTU=1460
MTU_W_HEADER=$((MTU+20)) MTU_W_HEADER=$((MTU + 20))
ip link set dev $INITIATOR_INTERFACE mtu $MTU ip link set dev $INITIATOR_INTERFACE mtu $MTU
ethtool -K $INITIATOR_INTERFACE tso off ethtool -K $INITIATOR_INTERFACE tso off
ethtool -k $INITIATOR_INTERFACE ethtool -k $INITIATOR_INTERFACE
@ -119,8 +119,8 @@ function start_vpp() {
session { evt_qs_memfd_seg } \ session { evt_qs_memfd_seg } \
socksvr { socket-name /run/vpp-api.sock } \ socksvr { socket-name /run/vpp-api.sock } \
plugins { \ plugins { \
plugin default { disable } \ plugin default { disable } \
plugin dpdk_plugin.so { enable } \ plugin dpdk_plugin.so { enable } \
} & } &
vpp_pid=$! vpp_pid=$!
@ -131,13 +131,13 @@ function start_vpp() {
# Wait until VPP starts responding # Wait until VPP starts responding
xtrace_disable xtrace_disable
counter=40 counter=40
while [ $counter -gt 0 ] ; do while [ $counter -gt 0 ]; do
vppctl show version | grep -E "vpp v[0-9]+\.[0-9]+" && break vppctl show version | grep -E "vpp v[0-9]+\.[0-9]+" && break
counter=$(( counter - 1 )) counter=$((counter - 1))
sleep 0.5 sleep 0.5
done done
xtrace_restore xtrace_restore
if [ $counter -eq 0 ] ; then if [ $counter -eq 0 ]; then
return 1 return 1
fi fi
@ -171,8 +171,8 @@ function start_vpp() {
sleep 3 sleep 3
# SC1010: ping -M do - in this case do is an option not bash special word # SC1010: ping -M do - in this case do is an option not bash special word
# shellcheck disable=SC1010 # shellcheck disable=SC1010
ping -c 1 $TARGET_IP -s $(( MTU - 28 )) -M do ping -c 1 $TARGET_IP -s $((MTU - 28)) -M do
vppctl ping $INITIATOR_IP repeat 1 size $(( MTU - (28 + 8) )) verbose | grep -E "$MTU_W_HEADER bytes from $INITIATOR_IP" vppctl ping $INITIATOR_IP repeat 1 size $((MTU - (28 + 8))) verbose | grep -E "$MTU_W_HEADER bytes from $INITIATOR_IP"
} }
function kill_vpp() { function kill_vpp() {
@ -187,7 +187,7 @@ function kill_vpp() {
} }
function initiator_json_config() { function initiator_json_config() {
# Prepare config file for iSCSI initiator # Prepare config file for iSCSI initiator
jq . <<-JSON jq . <<- JSON
{ {
"subsystems": [ "subsystems": [
{ {

View File

@ -69,7 +69,7 @@ echo "Error injection test done"
if [ -z "$NO_NVME" ]; then if [ -z "$NO_NVME" ]; then
bdev_size=$(get_bdev_size Nvme0n1) bdev_size=$(get_bdev_size Nvme0n1)
split_size=$((bdev_size/2)) split_size=$((bdev_size / 2))
if [ $split_size -gt 10000 ]; then if [ $split_size -gt 10000 ]; then
split_size=10000 split_size=10000
fi fi

View File

@ -75,7 +75,7 @@ parted -s /dev/$dev mklabel msdos
parted -s /dev/$dev mkpart primary '0%' '100%' parted -s /dev/$dev mkpart primary '0%' '100%'
sleep 1 sleep 1
function filesystem_test { function filesystem_test() {
fstype=$1 fstype=$1
make_filesystem ${fstype} /dev/${dev}1 make_filesystem ${fstype} /dev/${dev}1

View File

@ -82,7 +82,7 @@ $rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) " malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)" malloc_bdevs+="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$malloc_bdevs" $rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$malloc_bdevs"
bdev=$( $rpc_py bdev_malloc_create 1024 512 ) bdev=$($rpc_py bdev_malloc_create 1024 512)
# "raid0:0" ==> use raid0 blockdev for LUN0 # "raid0:0" ==> use raid0 blockdev for LUN0
# "1:2" ==> map PortalGroup1 to InitiatorGroup2 # "1:2" ==> map PortalGroup1 to InitiatorGroup2
# "64" ==> iSCSI queue depth 64 # "64" ==> iSCSI queue depth 64
@ -131,7 +131,6 @@ $rpc_py bdev_malloc_delete ${bdev}
fio_status=0 fio_status=0
wait $fio_pid || fio_status=$? wait $fio_pid || fio_status=$?
if [ $fio_status -eq 0 ]; then if [ $fio_status -eq 0 ]; then
echo "iscsi hotplug test: fio successful - expected failure" echo "iscsi hotplug test: fio successful - expected failure"
exit 1 exit 1

View File

@ -49,7 +49,7 @@ sleep 1
trap 'killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT trap 'killprocess $iscsipid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
$rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t 30 2>$output_dir/iscsi_autofuzz_logs.txt $rootdir/test/app/fuzz/iscsi_fuzz/iscsi_fuzz -m 0xF0 -T $TARGET_IP -t 30 2> $output_dir/iscsi_autofuzz_logs.txt
$rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:disk1' $rpc_py iscsi_delete_target_node 'iqn.2016-06.io.spdk:disk1'

View File

@ -40,9 +40,9 @@ trap 'killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w verify -t 5 -s 512 "$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w verify -t 5 -s 512
if [ $RUN_NIGHTLY -eq 1 ]; then if [ $RUN_NIGHTLY -eq 1 ]; then
"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w unmap -t 5 -s 512 "$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w unmap -t 5 -s 512
"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w flush -t 5 -s 512 "$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w flush -t 5 -s 512
"$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w reset -t 10 -s 512 "$rootdir/test/bdev/bdevperf/bdevperf" --json <(initiator_json_config) -q 128 -o 4096 -w reset -t 10 -s 512
fi fi
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT

View File

@ -47,12 +47,12 @@ function rpc_validate_ip() {
echo "Add new IP succeeded." echo "Add new IP succeeded."
else else
echo "Add new IP failed. Expected to succeed..." echo "Add new IP failed. Expected to succeed..."
exit 1; exit 1
fi fi
# Add same IP again # Add same IP again
if $cmd; then if $cmd; then
echo "Same IP existed. Expected to fail..." echo "Same IP existed. Expected to fail..."
exit 1; exit 1
fi fi
cmd="$rpc_py -s $1 net_interface_delete_ip_address 1 $MIGRATION_ADDRESS" cmd="$rpc_py -s $1 net_interface_delete_ip_address 1 $MIGRATION_ADDRESS"
@ -60,12 +60,12 @@ function rpc_validate_ip() {
echo "Delete existing IP succeeded." echo "Delete existing IP succeeded."
else else
echo "Delete existing IP failed. Expected to succeed..." echo "Delete existing IP failed. Expected to succeed..."
exit 1; exit 1
fi fi
# Delete same IP again # Delete same IP again
if $cmd; then if $cmd; then
echo "No required IP existed. Expected to fail..." echo "No required IP existed. Expected to fail..."
exit 1; exit 1
fi fi
} }

View File

@ -69,7 +69,7 @@ sleep 1
timing_enter discovery timing_enter discovery
iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
waitforiscsidevices $(( NUM_LVS * NUM_LVOL )) waitforiscsidevices $((NUM_LVS * NUM_LVOL))
timing_exit discovery timing_exit discovery
timing_enter fio timing_enter fio

View File

@ -11,7 +11,7 @@ if [ ! -x $FIO_PATH/fio ]; then
error "Invalid path of fio binary" error "Invalid path of fio binary"
fi fi
function run_spdk_iscsi_fio(){ function run_spdk_iscsi_fio() {
$FIO_PATH/fio $testdir/perf.job "$@" --output-format=json $FIO_PATH/fio $testdir/perf.job "$@" --output-format=json
} }
@ -23,8 +23,7 @@ iscsiadm -m node --login -p $IP_T:$ISCSI_PORT
waitforiscsidevices 1 waitforiscsidevices 1
disks=($(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')) disks=($(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}'))
for (( i=0; i < ${#disks[@]}; i++ )) for ((i = 0; i < ${#disks[@]}; i++)); do
do
filename+=$(printf /dev/%s: "${disks[i]}") filename+=$(printf /dev/%s: "${disks[i]}")
waitforfile $filename waitforfile $filename
echo noop > /sys/block/${disks[i]}/queue/scheduler echo noop > /sys/block/${disks[i]}/queue/scheduler

View File

@ -20,9 +20,11 @@ NUM_JOBS=1
ISCSI_TGT_CM=0x02 ISCSI_TGT_CM=0x02
# Performance test for iscsi_tgt, run on devices with proper hardware support (target and inititator) # Performance test for iscsi_tgt, run on devices with proper hardware support (target and inititator)
function usage() function usage() {
{ [[ -n $2 ]] && (
[[ -n $2 ]] && ( echo "$2"; echo ""; ) echo "$2"
echo ""
)
echo "Usage: $(basename $1) [options]" echo "Usage: $(basename $1) [options]"
echo "-h, --help Print help and exit" echo "-h, --help Print help and exit"
echo " --fiopath=PATH Path to fio directory on initiator. [default=$FIO_PATH]" echo " --fiopath=PATH Path to fio directory on initiator. [default=$FIO_PATH]"
@ -36,19 +38,31 @@ function usage()
while getopts 'h-:' optchar; do while getopts 'h-:' optchar; do
case "$optchar" in case "$optchar" in
-) -)
case "$OPTARG" in case "$OPTARG" in
help) usage $0; exit 0 ;; help)
fiopath=*) FIO_BIN="${OPTARG#*=}" ;; usage $0
disk_no=*) DISKNO="${OPTARG#*=}" ;; exit 0
target_ip=*) TARGET_IP="${OPTARG#*=}" ;; ;;
initiator_ip=*) INITIATOR_IP="${OPTARG#*=}" ;; fiopath=*) FIO_BIN="${OPTARG#*=}" ;;
init_mgmnt_ip=*) IP_I_SSH="${OPTARG#*=}" ;; disk_no=*) DISKNO="${OPTARG#*=}" ;;
iscsi_tgt_mask=*) ISCSI_TGT_CM="${OPTARG#*=}" ;; target_ip=*) TARGET_IP="${OPTARG#*=}" ;;
*) usage $0 echo "Invalid argument '$OPTARG'"; exit 1 ;; initiator_ip=*) INITIATOR_IP="${OPTARG#*=}" ;;
esac init_mgmnt_ip=*) IP_I_SSH="${OPTARG#*=}" ;;
;; iscsi_tgt_mask=*) ISCSI_TGT_CM="${OPTARG#*=}" ;;
h) usage $0; exit 0 ;; *)
*) usage $0 "Invalid argument '$optchar'"; exit 1 ;; usage $0 echo "Invalid argument '$OPTARG'"
exit 1
;;
esac
;;
h)
usage $0
exit 0
;;
*)
usage $0 "Invalid argument '$optchar'"
exit 1
;;
esac esac
done done
@ -68,7 +82,7 @@ if [ $EUID -ne 0 ]; then
error "INFO: This script must be run with root privileges" error "INFO: This script must be run with root privileges"
fi fi
function ssh_initiator(){ function ssh_initiator() {
ssh -i $HOME/.ssh/spdk_vhost_id_rsa root@$IP_I_SSH "$@" ssh -i $HOME/.ssh/spdk_vhost_id_rsa root@$IP_I_SSH "$@"
} }
@ -100,8 +114,7 @@ fi
$rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT $rpc_py iscsi_create_portal_group $PORTAL_TAG $TARGET_IP:$ISCSI_PORT
$rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK $rpc_py iscsi_create_initiator_group $INITIATOR_TAG $INITIATOR_NAME $NETMASK
for (( i=0; i < DISKNO; i++ )) for ((i = 0; i < DISKNO; i++)); do
do
$rpc_py iscsi_create_target_node Target${i} Target${i}_alias "${bdevs[i]}:0" "$PORTAL_TAG:$INITIATOR_TAG" 64 -d $rpc_py iscsi_create_target_node Target${i} Target${i}_alias "${bdevs[i]}:0" "$PORTAL_TAG:$INITIATOR_TAG" 64 -d
done done
@ -110,7 +123,7 @@ rm -f $testdir/perf.job
timing_exit iscsi_config timing_exit iscsi_config
timing_enter iscsi_initiator timing_enter iscsi_initiator
ssh_initiator bash -s - < $testdir/iscsi_initiator.sh $FIO_PATH $TARGET_IP ssh_initiator bash -s - $FIO_PATH $TARGET_IP < $testdir/iscsi_initiator.sh
timing_exit iscsi_initiator timing_exit iscsi_initiator
ssh_initiator "cat perf_output/iscsi_fio.json" > $iscsi_fio_results ssh_initiator "cat perf_output/iscsi_fio.json" > $iscsi_fio_results

View File

@ -28,16 +28,16 @@ function run_fio() {
end_io_count=$(jq -r '.bdevs[0].num_read_ops' <<< "$iostats") end_io_count=$(jq -r '.bdevs[0].num_read_ops' <<< "$iostats")
end_bytes_read=$(jq -r '.bdevs[0].bytes_read' <<< "$iostats") end_bytes_read=$(jq -r '.bdevs[0].bytes_read' <<< "$iostats")
IOPS_RESULT=$(((end_io_count-start_io_count)/run_time)) IOPS_RESULT=$(((end_io_count - start_io_count) / run_time))
BANDWIDTH_RESULT=$(((end_bytes_read-start_bytes_read)/run_time)) BANDWIDTH_RESULT=$(((end_bytes_read - start_bytes_read) / run_time))
} }
function verify_qos_limits() { function verify_qos_limits() {
local result=$1 local result=$1
local limit=$2 local limit=$2
[ "$(bc <<< "$result > $limit*0.85")" -eq 1 ] && \ [ "$(bc <<< "$result > $limit*0.85")" -eq 1 ] \
[ "$(bc <<< "$result < $limit*1.05")" -eq 1 ] && [ "$(bc <<< "$result < $limit*1.05")" -eq 1 ]
} }
if [ -z "$TARGET_IP" ]; then if [ -z "$TARGET_IP" ]; then
@ -87,19 +87,19 @@ trap 'iscsicleanup; killprocess $pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTER
run_fio Malloc0 run_fio Malloc0
# Set IOPS/bandwidth limit to 50% of the actual unrestrained performance. # Set IOPS/bandwidth limit to 50% of the actual unrestrained performance.
IOPS_LIMIT=$((IOPS_RESULT/2)) IOPS_LIMIT=$((IOPS_RESULT / 2))
BANDWIDTH_LIMIT=$((BANDWIDTH_RESULT/2)) BANDWIDTH_LIMIT=$((BANDWIDTH_RESULT / 2))
# Set READ bandwidth limit to 50% of the RW bandwidth limit to be able # Set READ bandwidth limit to 50% of the RW bandwidth limit to be able
# to differentiate those two. # to differentiate those two.
READ_BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT/2)) READ_BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT / 2))
# Also round them down to nearest multiple of either 1000 IOPS or 1MB BW # Also round them down to nearest multiple of either 1000 IOPS or 1MB BW
# which are the minimal QoS granularities # which are the minimal QoS granularities
IOPS_LIMIT=$((IOPS_LIMIT/1000*1000)) IOPS_LIMIT=$((IOPS_LIMIT / 1000 * 1000))
BANDWIDTH_LIMIT_MB=$((BANDWIDTH_LIMIT/1024/1024)) BANDWIDTH_LIMIT_MB=$((BANDWIDTH_LIMIT / 1024 / 1024))
BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT_MB*1024*1024)) BANDWIDTH_LIMIT=$((BANDWIDTH_LIMIT_MB * 1024 * 1024))
READ_BANDWIDTH_LIMIT_MB=$((READ_BANDWIDTH_LIMIT/1024/1024)) READ_BANDWIDTH_LIMIT_MB=$((READ_BANDWIDTH_LIMIT / 1024 / 1024))
READ_BANDWIDTH_LIMIT=$((READ_BANDWIDTH_LIMIT_MB*1024*1024)) READ_BANDWIDTH_LIMIT=$((READ_BANDWIDTH_LIMIT_MB * 1024 * 1024))
# Limit the I/O rate by RPC, then confirm the observed rate matches. # Limit the I/O rate by RPC, then confirm the observed rate matches.
$rpc_py bdev_set_qos_limit Malloc0 --rw_ios_per_sec $IOPS_LIMIT $rpc_py bdev_set_qos_limit Malloc0 --rw_ios_per_sec $IOPS_LIMIT

View File

@ -37,10 +37,10 @@ rbd_bdev="$($rpc_py bdev_rbd_create $RBD_POOL $RBD_NAME 4096)"
$rpc_py bdev_get_bdevs $rpc_py bdev_get_bdevs
$rpc_py bdev_rbd_resize $rbd_bdev 2000 $rpc_py bdev_rbd_resize $rbd_bdev 2000
num_block=$($rpc_py bdev_get_bdevs|grep num_blocks|sed 's/[^[:digit:]]//g') num_block=$($rpc_py bdev_get_bdevs | grep num_blocks | sed 's/[^[:digit:]]//g')
# get the bdev size in MiB. # get the bdev size in MiB.
total_size=$(( num_block * 4096/ 1048576 )) total_size=$((num_block * 4096 / 1048576))
if [ $total_size != 2000 ];then if [ $total_size != 2000 ]; then
echo "resize failed." echo "resize failed."
exit 1 exit 1
fi fi

View File

@ -8,13 +8,13 @@ source $rootdir/test/iscsi_tgt/common.sh
function waitfortcp() { function waitfortcp() {
local addr="$2" local addr="$2"
if hash ip &>/dev/null; then if hash ip &> /dev/null; then
local have_ip_cmd=true local have_ip_cmd=true
else else
local have_ip_cmd=false local have_ip_cmd=false
fi fi
if hash ss &>/dev/null; then if hash ss &> /dev/null; then
local have_ss_cmd=true local have_ss_cmd=true
else else
local have_ss_cmd=false local have_ss_cmd=false
@ -25,7 +25,7 @@ function waitfortcp() {
xtrace_disable xtrace_disable
local ret=0 local ret=0
local i local i
for (( i = 40; i != 0; i-- )); do for ((i = 40; i != 0; i--)); do
# if the process is no longer running, then exit the script # if the process is no longer running, then exit the script
# since it means the application crashed # since it means the application crashed
if ! kill -s 0 $1; then if ! kill -s 0 $1; then
@ -55,7 +55,7 @@ function waitfortcp() {
done done
xtrace_restore xtrace_restore
if (( i == 0 )); then if ((i == 0)); then
echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$addr'" echo "ERROR: timeout while waiting for process (pid: $1) to start listening on '$addr'"
ret=1 ret=1
fi fi
@ -77,8 +77,8 @@ if [ -z "$TEST_TYPE" ]; then
fi fi
if [ "$TEST_TYPE" != "posix" ] && [ "$TEST_TYPE" != "vpp" ]; then if [ "$TEST_TYPE" != "posix" ] && [ "$TEST_TYPE" != "vpp" ]; then
echo "No correct sock implmentation specified" echo "No correct sock implmentation specified"
exit 1 exit 1
fi fi
HELLO_SOCK_APP="${TARGET_NS_CMD[*]} $rootdir/examples/sock/hello_world/hello_sock" HELLO_SOCK_APP="${TARGET_NS_CMD[*]} $rootdir/examples/sock/hello_world/hello_sock"
@ -94,14 +94,15 @@ timing_enter sock_client
echo "Testing client path" echo "Testing client path"
# start echo server using socat # start echo server using socat
$SOCAT_APP tcp-l:$ISCSI_PORT,fork,bind=$INITIATOR_IP exec:'/bin/cat' & server_pid=$! $SOCAT_APP tcp-l:$ISCSI_PORT,fork,bind=$INITIATOR_IP exec:'/bin/cat' &
server_pid=$!
trap 'killprocess $server_pid;iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT trap 'killprocess $server_pid;iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
waitfortcp $server_pid $INITIATOR_IP:$ISCSI_PORT waitfortcp $server_pid $INITIATOR_IP:$ISCSI_PORT
# send message using hello_sock client # send message using hello_sock client
message="**MESSAGE:This is a test message from the client**" message="**MESSAGE:This is a test message from the client**"
response=$( echo $message | $HELLO_SOCK_APP -H $INITIATOR_IP -P $ISCSI_PORT -N $TEST_TYPE) response=$(echo $message | $HELLO_SOCK_APP -H $INITIATOR_IP -P $ISCSI_PORT -N $TEST_TYPE)
if ! echo "$response" | grep -q "$message"; then if ! echo "$response" | grep -q "$message"; then
exit 1 exit 1
@ -120,13 +121,14 @@ timing_exit sock_client
timing_enter sock_server timing_enter sock_server
# start echo server using hello_sock echo server # start echo server using hello_sock echo server
$HELLO_SOCK_APP -H $TARGET_IP -P $ISCSI_PORT -S -N $TEST_TYPE & server_pid=$! $HELLO_SOCK_APP -H $TARGET_IP -P $ISCSI_PORT -S -N $TEST_TYPE &
server_pid=$!
trap 'killprocess $server_pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT trap 'killprocess $server_pid; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
waitforlisten $server_pid waitforlisten $server_pid
# send message to server using socat # send message to server using socat
message="**MESSAGE:This is a test message to the server**" message="**MESSAGE:This is a test message to the server**"
response=$( echo $message | $SOCAT_APP - tcp:$TARGET_IP:$ISCSI_PORT 2>/dev/null ) response=$(echo $message | $SOCAT_APP - tcp:$TARGET_IP:$ISCSI_PORT 2> /dev/null)
if [ "$message" != "$response" ]; then if [ "$message" != "$response" ]; then
exit 1 exit 1

View File

@ -51,7 +51,7 @@ echo "iscsi_tgt is listening. Running tests..."
timing_exit start_iscsi_tgt timing_exit start_iscsi_tgt
mkdir -p ${TRACE_TMP_FOLDER} mkdir -p ${TRACE_TMP_FOLDER}
./app/trace_record/spdk_trace_record -s iscsi -p ${iscsi_pid} -f ${TRACE_RECORD_OUTPUT} -q 1>${TRACE_RECORD_NOTICE_LOG} & ./app/trace_record/spdk_trace_record -s iscsi -p ${iscsi_pid} -f ${TRACE_RECORD_OUTPUT} -q 1> ${TRACE_RECORD_NOTICE_LOG} &
record_pid=$! record_pid=$!
echo "Trace record pid: $record_pid" echo "Trace record pid: $record_pid"
@ -71,7 +71,7 @@ sleep 1
iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT iscsiadm -m discovery -t sendtargets -p $TARGET_IP:$ISCSI_PORT
iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT iscsiadm -m node --login -p $TARGET_IP:$ISCSI_PORT
waitforiscsidevices $(( CONNECTION_NUMBER + 1 )) waitforiscsidevices $((CONNECTION_NUMBER + 1))
trap 'iscsicleanup; killprocess $iscsi_pid; killprocess $record_pid; delete_tmp_files; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT trap 'iscsicleanup; killprocess $iscsi_pid; killprocess $record_pid; delete_tmp_files; iscsitestfini $1 $2; exit 1' SIGINT SIGTERM EXIT
@ -112,23 +112,23 @@ len_arr_record_num=${#arr_record_num[@]}
len_arr_trace_tool_num=${#arr_trace_tool_num[@]} len_arr_trace_tool_num=${#arr_trace_tool_num[@]}
#lcore num check #lcore num check
if [ $len_arr_record_num -ne $len_arr_trace_tool_num ]; then if [ $len_arr_record_num -ne $len_arr_trace_tool_num ]; then
echo "trace record test on iscsi: failure on lcore number check" echo "trace record test on iscsi: failure on lcore number check"
set -e set -e
exit 1 exit 1
fi fi
#trace entries num check #trace entries num check
for i in $(seq 0 $((len_arr_record_num - 1))); do for i in $(seq 0 $((len_arr_record_num - 1))); do
if [ ${arr_record_num[$i]} -le ${NUM_TRACE_ENTRIES} ]; then if [ ${arr_record_num[$i]} -le ${NUM_TRACE_ENTRIES} ]; then
echo "trace record test on iscsi: failure on inefficient entries number check" echo "trace record test on iscsi: failure on inefficient entries number check"
set -e set -e
exit 1 exit 1
fi fi
if [ ${arr_record_num[$i]} -ne ${arr_trace_tool_num[$i]} ]; then if [ ${arr_record_num[$i]} -ne ${arr_trace_tool_num[$i]} ]; then
echo "trace record test on iscsi: failure on entries number check" echo "trace record test on iscsi: failure on entries number check"
set -e set -e
exit 1 exit 1
fi fi
done done
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT

View File

@ -14,13 +14,13 @@ if [[ $SPDK_TEST_VHOST -ne 1 && $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
echo " Setting SPDK_TEST_VHOST=1 for duration of current script." echo " Setting SPDK_TEST_VHOST=1 for duration of current script."
fi fi
if (( SPDK_TEST_BLOCKDEV + \ if ((SPDK_TEST_BLOCKDEV + \
SPDK_TEST_ISCSI + SPDK_TEST_ISCSI + \
SPDK_TEST_NVMF + SPDK_TEST_NVMF + \
SPDK_TEST_VHOST + SPDK_TEST_VHOST + \
SPDK_TEST_VHOST_INIT + SPDK_TEST_VHOST_INIT + \
SPDK_TEST_PMDK + SPDK_TEST_PMDK + \
SPDK_TEST_RBD == 0 )); then SPDK_TEST_RBD == 0)); then
echo "WARNING: No tests are enabled so not running JSON configuration tests" echo "WARNING: No tests are enabled so not running JSON configuration tests"
exit 0 exit 0
fi fi
@ -58,41 +58,41 @@ function tgt_check_notification_types() {
} }
function tgt_check_notifications() { function tgt_check_notifications() {
local event_line event ev_type ev_ctx local event_line event ev_type ev_ctx
local rc="" local rc=""
while read -r event_line; do while read -r event_line; do
# remove ID # remove ID
event="${event_line%:*}" event="${event_line%:*}"
ev_type=${event%:*} ev_type=${event%:*}
ev_ctx=${event#*:} ev_ctx=${event#*:}
ex_ev_type=${1%%:*} ex_ev_type=${1%%:*}
ex_ev_ctx=${1#*:} ex_ev_ctx=${1#*:}
last_event_id=${event_line##*:} last_event_id=${event_line##*:}
# set rc=false in case of failure so all errors can be printed # set rc=false in case of failure so all errors can be printed
if (( $# == 0 )); then if (($# == 0)); then
echo "ERROR: got extra event: $event_line" echo "ERROR: got extra event: $event_line"
rc=false rc=false
continue continue
elif ! echo "$ev_type" | grep -E -q "^${ex_ev_type}\$" || ! echo "$ev_ctx" | grep -E -q "^${ex_ev_ctx}\$"; then elif ! echo "$ev_type" | grep -E -q "^${ex_ev_type}\$" || ! echo "$ev_ctx" | grep -E -q "^${ex_ev_ctx}\$"; then
echo "ERROR: expected event '$1' but got '$event' (whole event line: $event_line)" echo "ERROR: expected event '$1' but got '$event' (whole event line: $event_line)"
rc=false rc=false
fi fi
shift shift
done < <(tgt_rpc notify_get_notifications -i ${last_event_id} | jq -r '.[] | "\(.type):\(.ctx):\(.id)"') done < <(tgt_rpc notify_get_notifications -i ${last_event_id} | jq -r '.[] | "\(.type):\(.ctx):\(.id)"')
$rc $rc
if (( $# != 0 )); then if (($# != 0)); then
echo "ERROR: missing events:" echo "ERROR: missing events:"
echo "$@" echo "$@"
return 1 return 1
fi fi
} }
# $1 - target / initiator # $1 - target / initiator
@ -102,7 +102,7 @@ function json_config_test_start_app() {
shift shift
[[ -n "${#app_socket[$app]}" ]] # Check app type [[ -n "${#app_socket[$app]}" ]] # Check app type
[[ -z "${app_pid[$app]}" ]] # Assert if app is not running [[ -z "${app_pid[$app]}" ]] # Assert if app is not running
local app_extra_params="" local app_extra_params=""
if [[ $SPDK_TEST_VHOST -eq 1 || $SPDK_TEST_VHOST_INIT -eq 1 ]]; then if [[ $SPDK_TEST_VHOST -eq 1 || $SPDK_TEST_VHOST_INIT -eq 1 ]]; then
@ -129,8 +129,8 @@ function json_config_test_shutdown_app() {
# spdk_kill_instance RPC will trigger ASAN # spdk_kill_instance RPC will trigger ASAN
kill -SIGINT ${app_pid[$app]} kill -SIGINT ${app_pid[$app]}
for (( i=0; i<30; i++ )); do for ((i = 0; i < 30; i++)); do
if ! kill -0 ${app_pid[$app]} 2>/dev/null; then if ! kill -0 ${app_pid[$app]} 2> /dev/null; then
app_pid[$app]= app_pid[$app]=
break break
fi fi
@ -152,7 +152,7 @@ function create_bdev_subsystem_config() {
if [[ $SPDK_TEST_BLOCKDEV -eq 1 ]]; then if [[ $SPDK_TEST_BLOCKDEV -eq 1 ]]; then
local lvol_store_base_bdev=Nvme0n1 local lvol_store_base_bdev=Nvme0n1
if ! tgt_rpc get_bdevs --name ${lvol_store_base_bdev} >/dev/null; then if ! tgt_rpc get_bdevs --name ${lvol_store_base_bdev} > /dev/null; then
if [[ $(uname -s) = Linux ]]; then if [[ $(uname -s) = Linux ]]; then
lvol_store_base_bdev=aio_disk lvol_store_base_bdev=aio_disk
echo "WARNING: No NVMe drive found. Using '$lvol_store_base_bdev' instead." echo "WARNING: No NVMe drive found. Using '$lvol_store_base_bdev' instead."
@ -191,7 +191,7 @@ function create_bdev_subsystem_config() {
# This AIO bdev must be large enough to be used as LVOL store # This AIO bdev must be large enough to be used as LVOL store
dd if=/dev/zero of=/tmp/sample_aio bs=1024 count=102400 dd if=/dev/zero of=/tmp/sample_aio bs=1024 count=102400
tgt_rpc bdev_aio_create /tmp/sample_aio aio_disk 1024 tgt_rpc bdev_aio_create /tmp/sample_aio aio_disk 1024
expected_notifications+=( bdev_register:aio_disk ) expected_notifications+=(bdev_register:aio_disk)
fi fi
# For LVOLs use split to check for proper order of initialization. # For LVOLs use split to check for proper order of initialization.
@ -200,8 +200,8 @@ function create_bdev_subsystem_config() {
tgt_rpc bdev_lvol_create_lvstore -c 1048576 ${lvol_store_base_bdev}p0 lvs_test tgt_rpc bdev_lvol_create_lvstore -c 1048576 ${lvol_store_base_bdev}p0 lvs_test
tgt_rpc bdev_lvol_create -l lvs_test lvol0 32 tgt_rpc bdev_lvol_create -l lvs_test lvol0 32
tgt_rpc bdev_lvol_create -l lvs_test -t lvol1 32 tgt_rpc bdev_lvol_create -l lvs_test -t lvol1 32
tgt_rpc bdev_lvol_snapshot lvs_test/lvol0 snapshot0 tgt_rpc bdev_lvol_snapshot lvs_test/lvol0 snapshot0
tgt_rpc bdev_lvol_clone lvs_test/snapshot0 clone0 tgt_rpc bdev_lvol_clone lvs_test/snapshot0 clone0
expected_notifications+=( expected_notifications+=(
"bdev_register:$RE_UUID" "bdev_register:$RE_UUID"
@ -231,13 +231,13 @@ function create_bdev_subsystem_config() {
rm -f $pmem_pool_file rm -f $pmem_pool_file
tgt_rpc create_pmem_pool $pmem_pool_file 128 4096 tgt_rpc create_pmem_pool $pmem_pool_file 128 4096
tgt_rpc bdev_pmem_create -n pmem1 $pmem_pool_file tgt_rpc bdev_pmem_create -n pmem1 $pmem_pool_file
expected_notifications+=( bdev_register:pmem1 ) expected_notifications+=(bdev_register:pmem1)
fi fi
if [[ $SPDK_TEST_RBD -eq 1 ]]; then if [[ $SPDK_TEST_RBD -eq 1 ]]; then
rbd_setup 127.0.0.1 rbd_setup 127.0.0.1
tgt_rpc bdev_rbd_create $RBD_POOL $RBD_NAME 4096 tgt_rpc bdev_rbd_create $RBD_POOL $RBD_NAME 4096
expected_notifications+=( bdev_register:Ceph0 ) expected_notifications+=(bdev_register:Ceph0)
fi fi
tgt_check_notifications "${expected_notifications[@]}" tgt_check_notifications "${expected_notifications[@]}"
@ -249,9 +249,9 @@ function cleanup_bdev_subsystem_config() {
timing_enter "${FUNCNAME[0]}" timing_enter "${FUNCNAME[0]}"
if [[ $SPDK_TEST_BLOCKDEV -eq 1 ]]; then if [[ $SPDK_TEST_BLOCKDEV -eq 1 ]]; then
tgt_rpc bdev_lvol_delete lvs_test/clone0 tgt_rpc bdev_lvol_delete lvs_test/clone0
tgt_rpc bdev_lvol_delete lvs_test/lvol0 tgt_rpc bdev_lvol_delete lvs_test/lvol0
tgt_rpc bdev_lvol_delete lvs_test/snapshot0 tgt_rpc bdev_lvol_delete lvs_test/snapshot0
tgt_rpc bdev_lvol_delete_lvstore -l lvs_test tgt_rpc bdev_lvol_delete_lvstore -l lvs_test
fi fi
@ -278,16 +278,16 @@ function create_vhost_subsystem_config() {
tgt_rpc bdev_malloc_create 64 1024 --name MallocForVhost0 tgt_rpc bdev_malloc_create 64 1024 --name MallocForVhost0
tgt_rpc bdev_split_create MallocForVhost0 8 tgt_rpc bdev_split_create MallocForVhost0 8
tgt_rpc vhost_create_scsi_controller VhostScsiCtrlr0 tgt_rpc vhost_create_scsi_controller VhostScsiCtrlr0
tgt_rpc vhost_scsi_controller_add_target VhostScsiCtrlr0 0 MallocForVhost0p3 tgt_rpc vhost_scsi_controller_add_target VhostScsiCtrlr0 0 MallocForVhost0p3
tgt_rpc vhost_scsi_controller_add_target VhostScsiCtrlr0 -1 MallocForVhost0p4 tgt_rpc vhost_scsi_controller_add_target VhostScsiCtrlr0 -1 MallocForVhost0p4
tgt_rpc vhost_controller_set_coalescing VhostScsiCtrlr0 1 100 tgt_rpc vhost_controller_set_coalescing VhostScsiCtrlr0 1 100
tgt_rpc vhost_create_blk_controller VhostBlkCtrlr0 MallocForVhost0p5 tgt_rpc vhost_create_blk_controller VhostBlkCtrlr0 MallocForVhost0p5
# FIXME: enable after vhost-nvme is properly implemented against the latest rte_vhost (DPDK 19.05+) # FIXME: enable after vhost-nvme is properly implemented against the latest rte_vhost (DPDK 19.05+)
# tgt_rpc vhost_create_nvme_controller VhostNvmeCtrlr0 16 # tgt_rpc vhost_create_nvme_controller VhostNvmeCtrlr0 16
# tgt_rpc vhost_nvme_controller_add_ns VhostNvmeCtrlr0 MallocForVhost0p6 # tgt_rpc vhost_nvme_controller_add_ns VhostNvmeCtrlr0 MallocForVhost0p6
timing_exit "${FUNCNAME[0]}" timing_exit "${FUNCNAME[0]}"
} }
@ -315,9 +315,9 @@ function create_nvmf_subsystem_config() {
tgt_rpc bdev_malloc_create 4 1024 --name MallocForNvmf1 tgt_rpc bdev_malloc_create 4 1024 --name MallocForNvmf1
tgt_rpc nvmf_create_transport -t RDMA -u 8192 -c 0 tgt_rpc nvmf_create_transport -t RDMA -u 8192 -c 0
tgt_rpc nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 tgt_rpc nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
tgt_rpc nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 MallocForNvmf0 tgt_rpc nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 MallocForNvmf0
tgt_rpc nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 MallocForNvmf1 tgt_rpc nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 MallocForNvmf1
tgt_rpc nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT" tgt_rpc nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t RDMA -a $NVMF_FIRST_TARGET_IP -s "$NVMF_PORT"
timing_exit "${FUNCNAME[0]}" timing_exit "${FUNCNAME[0]}"
@ -326,14 +326,12 @@ function create_nvmf_subsystem_config() {
function create_virtio_initiator_config() { function create_virtio_initiator_config() {
timing_enter "${FUNCNAME[0]}" timing_enter "${FUNCNAME[0]}"
initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostScsiCtrlr0 -d scsi VirtioScsiCtrlr0 initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostScsiCtrlr0 -d scsi VirtioScsiCtrlr0
initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostBlkCtrlr0 -d blk VirtioBlk0 initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostBlkCtrlr0 -d blk VirtioBlk0
# TODO: initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostNvmeCtrlr0 -d nvme VirtioNvme0 # TODO: initiator_rpc bdev_virtio_attach_controller -t user -a /var/tmp/VhostNvmeCtrlr0 -d nvme VirtioNvme0
timing_exit "${FUNCNAME[0]}" timing_exit "${FUNCNAME[0]}"
} }
function json_config_test_init() {
function json_config_test_init()
{
timing_enter "${FUNCNAME[0]}" timing_enter "${FUNCNAME[0]}"
timing_enter json_config_setup_target timing_enter json_config_setup_target
@ -343,7 +341,7 @@ function json_config_test_init()
# Load nvme configuration. The load_config will issue framework_start_init automatically # Load nvme configuration. The load_config will issue framework_start_init automatically
( (
echo '{"subsystems": ['; echo '{"subsystems": ['
$rootdir/scripts/gen_nvme.sh --json | jq -r "del(.config[] | select(.params.name!=\"Nvme0\"))" $rootdir/scripts/gen_nvme.sh --json | jq -r "del(.config[] | select(.params.name!=\"Nvme0\"))"
echo ']}' echo ']}'
) | tgt_rpc load_config ) | tgt_rpc load_config
@ -412,13 +410,13 @@ function json_config_clear() {
# It causes that configuration may not be fully cleaned at this moment and # It causes that configuration may not be fully cleaned at this moment and
# we should to wait a while. (See github issue #789) # we should to wait a while. (See github issue #789)
count=100 count=100
while [ $count -gt 0 ] ; do while [ $count -gt 0 ]; do
$rootdir/scripts/rpc.py -s "${app_socket[$1]}" save_config | $config_filter -method delete_global_parameters | $config_filter -method check_empty && break $rootdir/scripts/rpc.py -s "${app_socket[$1]}" save_config | $config_filter -method delete_global_parameters | $config_filter -method check_empty && break
count=$(( count -1 )) count=$((count - 1))
sleep 0.1 sleep 0.1
done done
if [ $count -eq 0 ] ; then if [ $count -eq 0 ]; then
return 1 return 1
fi fi
} }
@ -465,7 +463,7 @@ fi
echo "INFO: changing configuration and checking if this can be detected..." echo "INFO: changing configuration and checking if this can be detected..."
# Self test to check if configuration diff can be detected. # Self test to check if configuration diff can be detected.
tgt_rpc bdev_malloc_delete MallocBdevForConfigChangeCheck tgt_rpc bdev_malloc_delete MallocBdevForConfigChangeCheck
if $rootdir/test/json_config/json_diff.sh <(tgt_rpc save_config) "${configs_path[target]}" >/dev/null; then if $rootdir/test/json_config/json_diff.sh <(tgt_rpc save_config) "${configs_path[target]}" > /dev/null; then
echo "ERROR: intentional configuration difference not detected!" echo "ERROR: intentional configuration difference not detected!"
false false
else else

View File

@ -31,7 +31,7 @@ function test_construct_lvs() {
[ "$cluster_size" = "$LVS_DEFAULT_CLUSTER_SIZE" ] [ "$cluster_size" = "$LVS_DEFAULT_CLUSTER_SIZE" ]
total_clusters=$(jq -r '.[0].total_data_clusters' <<< "$lvs") total_clusters=$(jq -r '.[0].total_data_clusters' <<< "$lvs")
[ "$(jq -r '.[0].free_clusters' <<< "$lvs")" = "$total_clusters" ] [ "$(jq -r '.[0].free_clusters' <<< "$lvs")" = "$total_clusters" ]
[ "$(( total_clusters * cluster_size ))" = "$LVS_DEFAULT_CAPACITY" ] [ "$((total_clusters * cluster_size))" = "$LVS_DEFAULT_CAPACITY" ]
# remove the lvs and verify it's gone # remove the lvs and verify it's gone
rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid" rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
@ -100,11 +100,11 @@ function test_construct_lvs_different_cluster_size() {
# use the second malloc for some more lvs creation negative tests # use the second malloc for some more lvs creation negative tests
malloc2_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS) malloc2_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
# capacity bigger than malloc's # capacity bigger than malloc's
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $(( MALLOC_SIZE + 1 )) && false rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $((MALLOC_SIZE + 1)) && false
# capacity equal to malloc's (no space left for metadata) # capacity equal to malloc's (no space left for metadata)
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $MALLOC_SIZE && false rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $MALLOC_SIZE && false
# capacity smaller than malloc's, but still no space left for metadata # capacity smaller than malloc's, but still no space left for metadata
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $(( MALLOC_SIZE - 1 )) && false rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c $((MALLOC_SIZE - 1)) && false
# cluster size smaller than the minimum (8192) # cluster size smaller than the minimum (8192)
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c 8191 && false rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test -c 8191 && false
@ -137,7 +137,7 @@ function test_construct_lvs_clear_methods() {
malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS) malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
# first try to provide invalid clear method # first try to provide invalid clear method
rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test --clear-method invalid123 && false rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs2_test --clear-method invalid123 && false
# no lvs should be created # no lvs should be created
lvol_stores=$(rpc_cmd bdev_lvol_get_lvstores) lvol_stores=$(rpc_cmd bdev_lvol_get_lvstores)
@ -154,7 +154,7 @@ function test_construct_lvs_clear_methods() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
# clean up # clean up
rpc_cmd bdev_lvol_delete "$lvol_uuid" rpc_cmd bdev_lvol_delete "$lvol_uuid"
@ -181,10 +181,10 @@ function test_construct_lvol_fio_clear_method_none() {
get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid" get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
lvol_uuid=$(rpc_cmd bdev_lvol_create \ lvol_uuid=$(rpc_cmd bdev_lvol_create \
-c "$clear_method" \ -c "$clear_method" \
-u "$lvstore_uuid" \ -u "$lvstore_uuid" \
"$lvol_name" \ "$lvol_name" \
$(( jq_out["cluster_size"] / 1024**2 ))) $((jq_out["cluster_size"] / 1024 ** 2)))
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name" nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name"
run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" write 0xdd run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" write 0xdd
@ -203,12 +203,12 @@ function test_construct_lvol_fio_clear_method_none() {
metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3") metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3")
last_metadata_lba=$(( metadata_pages * 4096 / MALLOC_BS )) last_metadata_lba=$((metadata_pages * 4096 / MALLOC_BS))
offset_metadata_end=$(( last_metadata_lba * MALLOC_BS )) offset_metadata_end=$((last_metadata_lba * MALLOC_BS))
last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)") last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)")
last_cluster_of_metadata=$(( last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata )) last_cluster_of_metadata=$((last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata))
offset=$(( last_cluster_of_metadata * jq_out["cluster_size"] )) offset=$((last_cluster_of_metadata * jq_out["cluster_size"]))
size_metadata_end=$(( offset - offset_metadata_end )) size_metadata_end=$((offset - offset_metadata_end))
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged. # Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0x00 run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0x00
@ -233,17 +233,17 @@ function test_construct_lvol_fio_clear_method_unmap() {
malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS") malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name" nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name"
run_fio_test "$nbd_name" 0 $(( 256 * 1024**2 )) write 0xdd run_fio_test "$nbd_name" 0 $((256 * 1024 ** 2)) write 0xdd
nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name" nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore --clear-method none "$malloc_dev" "$lvstore_name") lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore --clear-method none "$malloc_dev" "$lvstore_name")
get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid" get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
lvol_uuid=$(rpc_cmd bdev_lvol_create \ lvol_uuid=$(rpc_cmd bdev_lvol_create \
-c "$clear_method" \ -c "$clear_method" \
-u "$lvstore_uuid" \ -u "$lvstore_uuid" \
"$lvol_name" \ "$lvol_name" \
$(( jq_out["cluster_size"] / 1024**2 ))) $((jq_out["cluster_size"] / 1024 ** 2)))
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name" nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name"
run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" read 0xdd run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" read 0xdd
@ -262,12 +262,12 @@ function test_construct_lvol_fio_clear_method_unmap() {
metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3") metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3")
last_metadata_lba=$(( metadata_pages * 4096 / MALLOC_BS )) last_metadata_lba=$((metadata_pages * 4096 / MALLOC_BS))
offset_metadata_end=$(( last_metadata_lba * MALLOC_BS )) offset_metadata_end=$((last_metadata_lba * MALLOC_BS))
last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)") last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)")
last_cluster_of_metadata=$(( last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata )) last_cluster_of_metadata=$((last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata))
offset=$(( last_cluster_of_metadata * jq_out["cluster_size"] )) offset=$((last_cluster_of_metadata * jq_out["cluster_size"]))
size_metadata_end=$(( offset - offset_metadata_end )) size_metadata_end=$((offset - offset_metadata_end))
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged. # Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0xdd run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0xdd
@ -294,7 +294,7 @@ function test_construct_lvol() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
# clean up and create another lvol, this time use lvs alias instead of uuid # clean up and create another lvol, this time use lvs alias instead of uuid
@ -307,7 +307,7 @@ function test_construct_lvol() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
# clean up # clean up
@ -326,10 +326,10 @@ function test_construct_multi_lvols() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# create 4 lvols # create 4 lvols
lvol_size_mb=$(( LVS_DEFAULT_CAPACITY_MB / 4 )) lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB / 4))
# round down lvol size to the nearest cluster size boundary # round down lvol size to the nearest cluster size boundary
lvol_size_mb=$(( lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB )) lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
for i in $(seq 1 4); do for i in $(seq 1 4); do
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" "lvol_test${i}" "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" "lvol_test${i}" "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -338,7 +338,7 @@ function test_construct_multi_lvols() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test${i}" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
done done
lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]') lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
@ -361,7 +361,7 @@ function test_construct_multi_lvols() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test${i}" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
done done
lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]') lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
@ -394,13 +394,13 @@ function test_construct_lvols_conflict_alias() {
lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid") lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid")
# use a different size for second malloc to keep those differentiable # use a different size for second malloc to keep those differentiable
malloc2_size_mb=$(( MALLOC_SIZE_MB / 2 )) malloc2_size_mb=$((MALLOC_SIZE_MB / 2))
# create an lvol store 2 # create an lvol store 2
malloc2_name=$(rpc_cmd bdev_malloc_create $malloc2_size_mb $MALLOC_BS) malloc2_name=$(rpc_cmd bdev_malloc_create $malloc2_size_mb $MALLOC_BS)
lvs2_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs_test2) lvs2_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc2_name" lvs_test2)
lvol2_size_mb=$(round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) ) lvol2_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
# create an lvol on lvs2 # create an lvol on lvs2
lvol2_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test2 lvol_test "$lvol2_size_mb") lvol2_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test2 lvol_test "$lvol2_size_mb")
@ -474,7 +474,7 @@ function test_construct_lvol_alias_conflict() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# create valid lvol # create valid lvol
lvol_size_mb=$(round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol1_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test lvol_test "$lvol_size_mb") lvol1_uuid=$(rpc_cmd bdev_lvol_create -l lvs_test lvol_test "$lvol_size_mb")
lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid") lvol1=$(rpc_cmd bdev_get_bdevs -b "$lvol1_uuid")
@ -500,8 +500,8 @@ function test_construct_nested_lvol() {
# create a nested lvs # create a nested lvs
nested_lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$lvol_uuid" nested_lvs) nested_lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$lvol_uuid" nested_lvs)
nested_lvol_size_mb=$(( LVS_DEFAULT_CAPACITY_MB - LVS_DEFAULT_CLUSTER_SIZE_MB )) nested_lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB - LVS_DEFAULT_CLUSTER_SIZE_MB))
nested_lvol_size=$(( nested_lvol_size_mb * 1024 * 1024 )) nested_lvol_size=$((nested_lvol_size_mb * 1024 * 1024))
# create a nested lvol # create a nested lvol
nested_lvol1_uuid=$(rpc_cmd bdev_lvol_create -u "$nested_lvs_uuid" nested_lvol1 "$nested_lvol_size_mb") nested_lvol1_uuid=$(rpc_cmd bdev_lvol_create -u "$nested_lvs_uuid" nested_lvol1 "$nested_lvol_size_mb")
@ -511,7 +511,7 @@ function test_construct_nested_lvol() {
[ "$(jq -r '.[0].uuid' <<< "$nested_lvol1")" = "$nested_lvol1_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$nested_lvol1")" = "$nested_lvol1_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$nested_lvol1")" = "nested_lvs/nested_lvol1" ] [ "$(jq -r '.[0].aliases[0]' <<< "$nested_lvol1")" = "nested_lvs/nested_lvol1" ]
[ "$(jq -r '.[0].block_size' <<< "$nested_lvol1")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$nested_lvol1")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$nested_lvol1")" = "$(( nested_lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$nested_lvol1")" = "$((nested_lvol_size / MALLOC_BS))" ]
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$nested_lvol1")" = "$nested_lvs_uuid" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$nested_lvol1")" = "$nested_lvs_uuid" ]
# try to create another nested lvol on a lvs that's already full # try to create another nested lvol on a lvs that's already full

View File

@ -3,10 +3,10 @@ MALLOC_BS=512
AIO_SIZE_MB=400 AIO_SIZE_MB=400
AIO_BS=4096 AIO_BS=4096
LVS_DEFAULT_CLUSTER_SIZE_MB=4 LVS_DEFAULT_CLUSTER_SIZE_MB=4
LVS_DEFAULT_CLUSTER_SIZE=$(( LVS_DEFAULT_CLUSTER_SIZE_MB * 1024 * 1024 )) LVS_DEFAULT_CLUSTER_SIZE=$((LVS_DEFAULT_CLUSTER_SIZE_MB * 1024 * 1024))
# reserve some MBs for lvolstore metadata # reserve some MBs for lvolstore metadata
LVS_DEFAULT_CAPACITY_MB=$(( MALLOC_SIZE_MB - LVS_DEFAULT_CLUSTER_SIZE_MB )) LVS_DEFAULT_CAPACITY_MB=$((MALLOC_SIZE_MB - LVS_DEFAULT_CLUSTER_SIZE_MB))
LVS_DEFAULT_CAPACITY=$(( LVS_DEFAULT_CAPACITY_MB * 1024 * 1024 )) LVS_DEFAULT_CAPACITY=$((LVS_DEFAULT_CAPACITY_MB * 1024 * 1024))
function get_bdev_jq() { function get_bdev_jq() {
rpc_cmd_simple_data_json bdev "$@" rpc_cmd_simple_data_json bdev "$@"
@ -28,7 +28,7 @@ function round_down() {
if [ -n "$2" ]; then if [ -n "$2" ]; then
CLUSTER_SIZE_MB=$2 CLUSTER_SIZE_MB=$2
fi fi
echo $(( $1 / CLUSTER_SIZE_MB * CLUSTER_SIZE_MB )) echo $(($1 / CLUSTER_SIZE_MB * CLUSTER_SIZE_MB))
} }
function run_fio_test() { function run_fio_test() {

View File

@ -38,7 +38,7 @@ function test_hotremove_lvol_store_multiple_lvols() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# calculate lvol size # calculate lvol size
lvol_size_mb=$( round_down $(( (MALLOC_SIZE_MB- LVS_DEFAULT_CLUSTER_SIZE_MB) / 4 )) ) lvol_size_mb=$(round_down $(((MALLOC_SIZE_MB - LVS_DEFAULT_CLUSTER_SIZE_MB) / 4)))
# create 4 lvols # create 4 lvols
for i in $(seq 1 4); do for i in $(seq 1 4); do
@ -115,7 +115,7 @@ function test_bdev_lvol_delete_lvstore_with_clones() {
[[ ${jq_out["name"]} == "$lvstore_name" ]] [[ ${jq_out["name"]} == "$lvstore_name" ]]
[[ ${jq_out["base_bdev"]} == "$malloc_dev" ]] [[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
size=$(( jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024**2 )) size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$size") bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$size")
@ -169,7 +169,7 @@ function test_unregister_lvol_bdev() {
[[ ${jq_out["name"]} == "$lvstore_name" ]] [[ ${jq_out["name"]} == "$lvstore_name" ]]
[[ ${jq_out["base_bdev"]} == "$malloc_dev" ]] [[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
size=$(( jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024**2 )) size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$size") bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$size")

View File

@ -13,8 +13,8 @@ function test_rename_positive() {
bdev_aliases=("lvs_test/lvol_test"{0..3}) bdev_aliases=("lvs_test/lvol_test"{0..3})
# Calculate size and create two lvol bdevs on top # Calculate size and create two lvol bdevs on top
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
# Create 4 lvol bdevs on top of previously created lvol store # Create 4 lvol bdevs on top of previously created lvol store
bdev_uuids=() bdev_uuids=()
@ -23,7 +23,7 @@ function test_rename_positive() {
lvol=$(rpc_cmd bdev_get_bdevs -b $lvol_uuid) lvol=$(rpc_cmd bdev_get_bdevs -b $lvol_uuid)
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases[i]}'"]')" ] [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases[i]}'"]')" ]
bdev_uuids+=("$lvol_uuid") bdev_uuids+=("$lvol_uuid")
done done
@ -46,13 +46,13 @@ function test_rename_positive() {
cluster_size=$(jq -r '.[0].cluster_size' <<< "$lvs") cluster_size=$(jq -r '.[0].cluster_size' <<< "$lvs")
[ "$cluster_size" = "$LVS_DEFAULT_CLUSTER_SIZE" ] [ "$cluster_size" = "$LVS_DEFAULT_CLUSTER_SIZE" ]
total_clusters=$(jq -r '.[0].total_data_clusters' <<< "$lvs") total_clusters=$(jq -r '.[0].total_data_clusters' <<< "$lvs")
[ "$(( total_clusters * cluster_size ))" = "$LVS_DEFAULT_CAPACITY" ] [ "$((total_clusters * cluster_size))" = "$LVS_DEFAULT_CAPACITY" ]
for i in "${!bdev_uuids[@]}"; do for i in "${!bdev_uuids[@]}"; do
lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids[i]}") lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids[i]}")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases[i]}'"]')" ] [ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases[i]}'"]')" ]
done done
@ -68,7 +68,7 @@ function test_rename_positive() {
lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids[i]}") lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids[i]}")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${new_bdev_aliases[i]}'"]')" ] [ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${new_bdev_aliases[i]}'"]')" ]
done done
@ -104,8 +104,8 @@ function test_rename_lvs_negative() {
bdev_aliases_2=("lvs_test2/lvol_test_2_"{0..3}) bdev_aliases_2=("lvs_test2/lvol_test_2_"{0..3})
# Calculate size and create two lvol bdevs on top # Calculate size and create two lvol bdevs on top
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
# # Create 4 lvol bdevs on top of each lvol store # # Create 4 lvol bdevs on top of each lvol store
bdev_uuids_1=() bdev_uuids_1=()
@ -115,7 +115,7 @@ function test_rename_lvs_negative() {
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid1" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid1" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_1[i]}'"]')" ] [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_1[i]}'"]')" ]
bdev_uuids_1+=("$lvol_uuid") bdev_uuids_1+=("$lvol_uuid")
@ -123,7 +123,7 @@ function test_rename_lvs_negative() {
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid2" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid2" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_2[i]}'"]')" ] [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_2[i]}'"]')" ]
bdev_uuids_2+=("$lvol_uuid") bdev_uuids_2+=("$lvol_uuid")
done done
@ -148,13 +148,13 @@ function test_rename_lvs_negative() {
lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids_1[i]}") lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids_1[i]}")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid1" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid1" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_1[i]}'"]')" ] [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_1[i]}'"]')" ]
lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids_2[i]}") lvol=$(rpc_cmd bdev_get_bdevs -b "${bdev_uuids_2[i]}")
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid2" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid2" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_2[i]}'"]')" ] [ "$(jq '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["'${bdev_aliases_2[i]}'"]')" ]
done done
@ -181,8 +181,8 @@ function test_lvol_rename_negative() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate lvol bdev size # Calculate lvol bdev size
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
# Create two lvol bdevs on top of previously created lvol store # Create two lvol bdevs on top of previously created lvol store
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb") lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb")
@ -196,7 +196,7 @@ function test_lvol_rename_negative() {
lvol=$(rpc_cmd bdev_get_bdevs -b $lvol_uuid1) lvol=$(rpc_cmd bdev_get_bdevs -b $lvol_uuid1)
[ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ] [ "$(jq -r '.[0].driver_specific.lvol.lvol_store_uuid' <<< "$lvol")" = "$lvs_uuid" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
[ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["lvs_test/lvol_test1"]')" ] [ "$(jq -r '.[0].aliases|sort' <<< "$lvol")" = "$(jq '.|sort' <<< '["lvs_test/lvol_test1"]')" ]
rpc_cmd bdev_lvol_delete lvs_test/lvol_test1 rpc_cmd bdev_lvol_delete lvs_test/lvol_test1

View File

@ -13,8 +13,8 @@ function test_resize_lvol() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# calculate lvol size # calculate lvol size
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
# create an lvol on top # create an lvol on top
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
@ -23,28 +23,28 @@ function test_resize_lvol() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# resize the lvol to twice its original size # resize the lvol to twice its original size
lvol_size_mb=$(( lvol_size_mb * 2 )) lvol_size_mb=$((lvol_size_mb * 2))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
rpc_cmd bdev_lvol_resize "$lvol_uuid" "$lvol_size_mb" rpc_cmd bdev_lvol_resize "$lvol_uuid" "$lvol_size_mb"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# resize the lvol to four times its original size, use its name instead of uuid # resize the lvol to four times its original size, use its name instead of uuid
lvol_size_mb=$(( lvol_size_mb * 2 )) lvol_size_mb=$((lvol_size_mb * 2))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
rpc_cmd bdev_lvol_resize lvs_test/lvol_test "$lvol_size_mb" rpc_cmd bdev_lvol_resize lvs_test/lvol_test "$lvol_size_mb"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# resize the lvol to 0 using lvol bdev alias # resize the lvol to 0 using lvol bdev alias
lvol_size_mb=0 lvol_size_mb=0
lvol_size=0 lvol_size=0
rpc_cmd bdev_lvol_resize "lvs_test/lvol_test" "$lvol_size_mb" rpc_cmd bdev_lvol_resize "lvs_test/lvol_test" "$lvol_size_mb"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# clean up # clean up
rpc_cmd bdev_lvol_delete "$lvol_uuid" rpc_cmd bdev_lvol_delete "$lvol_uuid"
@ -70,13 +70,13 @@ function test_resize_lvol_negative() {
rpc_cmd bdev_lvol_resize "$dummy_uuid" 0 && false rpc_cmd bdev_lvol_resize "$dummy_uuid" 0 && false
# just make sure the size of the real lvol did not change # just make sure the size of the real lvol did not change
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
# try to resize an lvol to a size bigger than lvs # try to resize an lvol to a size bigger than lvs
rpc_cmd bdev_lvol_resize "$lvol_uuid" "$MALLOC_SIZE_MB" && false rpc_cmd bdev_lvol_resize "$lvol_uuid" "$MALLOC_SIZE_MB" && false
# just make sure the size of the real lvol did not change # just make sure the size of the real lvol did not change
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CAPACITY / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CAPACITY / MALLOC_BS))" ]
# clean up # clean up
rpc_cmd bdev_lvol_delete "$lvol_uuid" rpc_cmd bdev_lvol_delete "$lvol_uuid"
@ -93,8 +93,8 @@ function test_resize_lvol_with_io_traffic() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# calculate lvol size # calculate lvol size
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
# create an lvol on top # create an lvol on top
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
@ -103,26 +103,26 @@ function test_resize_lvol_with_io_traffic() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test/lvol_test" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# prepare to do some I/O # prepare to do some I/O
trap 'nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0; exit 1' SIGINT SIGTERM EXIT trap 'nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0; exit 1' SIGINT SIGTERM EXIT
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
# write to the entire lvol # write to the entire lvol
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE )) count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
# writing beyond lvol size should fail # writing beyond lvol size should fail
offset=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE + 1 )) offset=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE + 1))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" seek=$offset count=1 && false dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" seek=$offset count=1 && false
# resize the lvol to twice its original size # resize the lvol to twice its original size
lvol_size_mb=$(( lvol_size_mb * 2 )) lvol_size_mb=$((lvol_size_mb * 2))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
rpc_cmd bdev_lvol_resize "$lvol_uuid" "$lvol_size_mb" rpc_cmd bdev_lvol_resize "$lvol_uuid" "$lvol_size_mb"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / MALLOC_BS))" ]
# writing beyond the original lvol size should now succeed, we need # writing beyond the original lvol size should now succeed, we need
# to restart NBD though as it may still use the old, cached size # to restart NBD though as it may still use the old, cached size
@ -137,7 +137,7 @@ function test_resize_lvol_with_io_traffic() {
# resize lvol down to a single cluster # resize lvol down to a single cluster
rpc_cmd bdev_lvol_resize "$lvol_uuid" "$LVS_DEFAULT_CLUSTER_SIZE_MB" rpc_cmd bdev_lvol_resize "$lvol_uuid" "$LVS_DEFAULT_CLUSTER_SIZE_MB"
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( LVS_DEFAULT_CLUSTER_SIZE / MALLOC_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((LVS_DEFAULT_CLUSTER_SIZE / MALLOC_BS))" ]
# make sure we can't write beyond the first cluster # make sure we can't write beyond the first cluster
trap 'nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0; exit 1' SIGINT SIGTERM EXIT trap 'nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0; exit 1' SIGINT SIGTERM EXIT
@ -168,7 +168,7 @@ function test_destroy_after_bdev_lvol_resize_positive() {
[[ ${jq_out["uuid"]} == "$lvstore_uuid" ]] [[ ${jq_out["uuid"]} == "$lvstore_uuid" ]]
[[ ${jq_out["name"]} == "$lvstore_name" ]] [[ ${jq_out["name"]} == "$lvstore_name" ]]
bdev_size=$(round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 ))) bdev_size=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$bdev_size") bdev_uuid=$(rpc_cmd bdev_lvol_create -u "$lvstore_uuid" "$lbd_name" "$bdev_size")
# start resizing in the following fashion: # start resizing in the following fashion:
@ -179,20 +179,20 @@ function test_destroy_after_bdev_lvol_resize_positive() {
# - size is equal 0 MiB # - size is equal 0 MiB
local resize local resize
for resize in \ for resize in \
"$bdev_size" \ "$bdev_size" \
$(( bdev_size + 4 )) \ $((bdev_size + 4)) \
$(( bdev_size * 2 )) \ $((bdev_size * 2)) \
$(( bdev_size * 3 )) \ $((bdev_size * 3)) \
$(( bdev_size * 4 - 4 )) \ $((bdev_size * 4 - 4)) \
0; do 0; do
resize=$(round_down $(( resize / 4 ))) resize=$(round_down $((resize / 4)))
rpc_cmd bdev_lvol_resize "$bdev_uuid" "$resize" rpc_cmd bdev_lvol_resize "$bdev_uuid" "$resize"
get_bdev_jq bdev_get_bdevs -b "$bdev_uuid" get_bdev_jq bdev_get_bdevs -b "$bdev_uuid"
[[ ${jq_out["name"]} == "$bdev_uuid" ]] [[ ${jq_out["name"]} == "$bdev_uuid" ]]
[[ ${jq_out["name"]} == "${jq_out["uuid"]}" ]] [[ ${jq_out["name"]} == "${jq_out["uuid"]}" ]]
(( jq_out["block_size"] == MALLOC_BS )) ((jq_out["block_size"] == MALLOC_BS))
(( jq_out["num_blocks"] * jq_out["block_size"] == resize * 1024**2 )) ((jq_out["num_blocks"] * jq_out["block_size"] == resize * 1024 ** 2))
done done
# cleanup # cleanup

View File

@ -11,20 +11,20 @@ function test_snapshot_compare_with_lvol_bdev() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Create two lvol bdevs # Create two lvol bdevs
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 6 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 6)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t) lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t)
lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb") lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb")
# Fill thin provisoned lvol bdev with 50% of its space # Fill thin provisoned lvol bdev with 50% of its space
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2 )) count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Fill whole thick provisioned lvol bdev # Fill whole thick provisioned lvol bdev
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" /dev/nbd0
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE )) count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
@ -35,7 +35,7 @@ function test_snapshot_compare_with_lvol_bdev() {
nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid1" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid1" /dev/nbd0
# Try to perform write operation on created snapshot # Try to perform write operation on created snapshot
# Check if filling snapshot of lvol bdev fails # Check if filling snapshot of lvol bdev fails
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE )) count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count && false dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count && false
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
@ -52,7 +52,7 @@ function test_snapshot_compare_with_lvol_bdev() {
cmp "$lvol_nbd2" "$snapshot_nbd2" cmp "$lvol_nbd2" "$snapshot_nbd2"
# Fill second half of thin provisioned lvol bdev # Fill second half of thin provisioned lvol bdev
count=$(( lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2 )) count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2))
dd if=/dev/urandom of="$lvol_nbd1" oflag=direct seek=$count bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count dd if=/dev/urandom of="$lvol_nbd1" oflag=direct seek=$count bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count
# Compare thin provisioned lvol bdev with its snapshot and check if it fails # Compare thin provisioned lvol bdev with its snapshot and check if it fails
@ -77,7 +77,6 @@ function test_snapshot_compare_with_lvol_bdev() {
check_leftover_devices check_leftover_devices
} }
# Check that when writing to lvol bdev # Check that when writing to lvol bdev
# creating snapshot ends with success # creating snapshot ends with success
function test_create_snapshot_with_io() { function test_create_snapshot_with_io() {
@ -85,8 +84,8 @@ function test_create_snapshot_with_io() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Create lvol bdev # Create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t) lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
@ -111,14 +110,13 @@ function test_create_snapshot_with_io() {
check_leftover_devices check_leftover_devices
} }
# Check that creating snapshot of snapshot will fail # Check that creating snapshot of snapshot will fail
function test_create_snapshot_of_snapshot() { function test_create_snapshot_of_snapshot() {
malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS) malloc_name=$(rpc_cmd bdev_malloc_create $MALLOC_SIZE_MB $MALLOC_BS)
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Create lvol bdev # Create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 3 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 3)))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -149,8 +147,8 @@ function test_clone_snapshot_relations() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev # Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 6 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 6)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -176,7 +174,7 @@ function test_clone_snapshot_relations() {
# Perform write operation to first clone # Perform write operation to first clone
# Change first half of its space # Change first half of its space
nbd_start_disks "$DEFAULT_RPC_ADDR" "$clone_uuid1" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$clone_uuid1" /dev/nbd0
fill_size=$(( lvol_size / 2 )) fill_size=$((lvol_size / 2))
run_fio_test /dev/nbd0 0 $fill_size "write" "0xaa" run_fio_test /dev/nbd0 0 $fill_size "write" "0xaa"
# Compare snapshot with second clone. Data on both bdevs should be the same # Compare snapshot with second clone. Data on both bdevs should be the same
@ -229,14 +227,14 @@ function test_clone_inflate() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev # Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
# Fill lvol bdev with 100% of its space # Fill lvol bdev with 100% of its space
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
run_fio_test /dev/nbd0 0 $(( lvol_size_mb * 1024 * 1024 )) "write" "0xcc" run_fio_test /dev/nbd0 0 $((lvol_size_mb * 1024 * 1024)) "write" "0xcc"
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Create snapshots of lvol bdev # Create snapshots of lvol bdev
@ -249,9 +247,9 @@ function test_clone_inflate() {
# Fill part of clone with data of known pattern # Fill part of clone with data of known pattern
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
first_fill=0 first_fill=0
second_fill=$(( lvol_size_mb * 1024 * 1024 * 3 / 4 )) second_fill=$((lvol_size_mb * 1024 * 1024 * 3 / 4))
run_fio_test /dev/nbd0 $first_fill $(( 1024 * 1024 )) "write" "0xdd" run_fio_test /dev/nbd0 $first_fill $((1024 * 1024)) "write" "0xdd"
run_fio_test /dev/nbd0 $second_fill $(( 1024 * 1024 )) "write" "0xdd" run_fio_test /dev/nbd0 $second_fill $((1024 * 1024)) "write" "0xdd"
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Do inflate # Do inflate
@ -264,10 +262,10 @@ function test_clone_inflate() {
# Check data consistency # Check data consistency
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
run_fio_test /dev/nbd0 $first_fill $(( 1024 * 1024 )) "read" "0xdd" run_fio_test /dev/nbd0 $first_fill $((1024 * 1024)) "read" "0xdd"
run_fio_test /dev/nbd0 $(( (first_fill + 1) * 1024 * 1024 )) $(( second_fill - 1024 * 1024 )) "read" "0xcc" run_fio_test /dev/nbd0 $(((first_fill + 1) * 1024 * 1024)) $((second_fill - 1024 * 1024)) "read" "0xcc"
run_fio_test /dev/nbd0 $second_fill $(( 1024 * 1024 )) "read" "0xdd" run_fio_test /dev/nbd0 $second_fill $((1024 * 1024)) "read" "0xdd"
run_fio_test /dev/nbd0 $(( second_fill + 1024 * 1024 )) $(( lvol_size_mb * 1024 * 1024 - ( second_fill + 1024 * 1024 ) )) "read" "0xcc" run_fio_test /dev/nbd0 $((second_fill + 1024 * 1024)) $((lvol_size_mb * 1024 * 1024 - (second_fill + 1024 * 1024))) "read" "0xcc"
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
# Clean up # Clean up
@ -285,7 +283,7 @@ function test_clone_decouple_parent() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev # Calculate size and create lvol bdev
lvol_size_mb=$(( 5 * LVS_DEFAULT_CLUSTER_SIZE_MB )) lvol_size_mb=$((5 * LVS_DEFAULT_CLUSTER_SIZE_MB))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t) lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -295,17 +293,17 @@ function test_clone_decouple_parent() {
# Fill first four out of 5 clusters of clone with data of known pattern # Fill first four out of 5 clusters of clone with data of known pattern
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
begin_fill=0 begin_fill=0
end_fill=$(( lvol_size_mb * 4 * 1024 * 1024 / 5 )) end_fill=$((lvol_size_mb * 4 * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $begin_fill $end_fill "write" "0xdd" run_fio_test /dev/nbd0 $begin_fill $end_fill "write" "0xdd"
# Create snapshot (snapshot<-lvol_bdev) # Create snapshot (snapshot<-lvol_bdev)
snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot) snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
# Fill second and fourth cluster of clone with data of known pattern # Fill second and fourth cluster of clone with data of known pattern
start_fill=$(( lvol_size_mb * 1024 * 1024 / 5 )) start_fill=$((lvol_size_mb * 1024 * 1024 / 5))
fill_range=$start_fill fill_range=$start_fill
run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xcc" run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xcc"
start_fill=$(( lvol_size_mb * 3 * 1024 * 1024 / 5 )) start_fill=$((lvol_size_mb * 3 * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xcc" run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xcc"
# Create snapshot (snapshot<-snapshot2<-lvol_bdev) # Create snapshot (snapshot<-snapshot2<-lvol_bdev)
@ -316,9 +314,9 @@ function test_clone_decouple_parent() {
run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xee" run_fio_test /dev/nbd0 $start_fill $fill_range "write" "0xee"
# Check data consistency # Check data consistency
pattern=( "0xdd" "0xee" "0xdd" "0xcc" "0x00" ) pattern=("0xdd" "0xee" "0xdd" "0xcc" "0x00")
for i in "${!pattern[@]}"; do for i in "${!pattern[@]}"; do
start_fill=$(( lvol_size_mb * i * 1024 * 1024 / 5 )) start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}" run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
done done
@ -341,7 +339,7 @@ function test_clone_decouple_parent() {
# Check data consistency # Check data consistency
for i in "${!pattern[@]}"; do for i in "${!pattern[@]}"; do
start_fill=$(( lvol_size_mb * i * 1024 * 1024 / 5 )) start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}" run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
done done
@ -361,7 +359,7 @@ function test_clone_decouple_parent() {
# Check data consistency # Check data consistency
for i in "${!pattern[@]}"; do for i in "${!pattern[@]}"; do
start_fill=$(( lvol_size_mb * i * 1024 * 1024 / 5 )) start_fill=$((lvol_size_mb * i * 1024 * 1024 / 5))
run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}" run_fio_test /dev/nbd0 $start_fill $fill_range "read" "${pattern[i]}"
done done
@ -378,7 +376,7 @@ function test_lvol_bdev_readonly() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev # Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -413,8 +411,8 @@ function test_delete_snapshot_with_clone() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev # Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -427,7 +425,7 @@ function test_delete_snapshot_with_clone() {
snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot) snapshot_uuid=$(rpc_cmd bdev_lvol_snapshot lvs_test/lvol_test lvol_snapshot)
# Fill first half of lvol bdev # Fill first half of lvol bdev
half_size=$(( lvol_size / 2 - 1 )) half_size=$((lvol_size / 2 - 1))
run_fio_test /dev/nbd0 0 $half_size "write" "0xee" run_fio_test /dev/nbd0 0 $half_size "write" "0xee"
# Check if snapshot was unchanged # Check if snapshot was unchanged
@ -447,7 +445,7 @@ function test_delete_snapshot_with_clone() {
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "false" ] [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "false" ]
run_fio_test /dev/nbd0 0 $half_size "read" "0xee" run_fio_test /dev/nbd0 0 $half_size "read" "0xee"
run_fio_test /dev/nbd0 $(( half_size + 1 )) $half_size "read" "0xcc" run_fio_test /dev/nbd0 $((half_size + 1)) $half_size "read" "0xcc"
# Clean up # Clean up
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
@ -463,8 +461,8 @@ function test_delete_snapshot_with_snapshot() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Calculate size and create lvol bdev # Calculate size and create lvol bdev
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 5 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 5)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb")
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
@ -479,9 +477,9 @@ function test_delete_snapshot_with_snapshot() {
[ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot"' ] [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot"' ]
# Fill second 1/3 of lvol bdev # Fill second 1/3 of lvol bdev
first_part=$(( lvol_size / 3 )) first_part=$((lvol_size / 3))
second_part=$(( lvol_size * 2 / 3 )) second_part=$((lvol_size * 2 / 3))
run_fio_test /dev/nbd0 $first_part $(( second_part - first_part )) "write" "0xee" run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "write" "0xee"
# Check if snapshot was unchanged # Check if snapshot was unchanged
nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid" /dev/nbd1 nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid" /dev/nbd1
@ -502,22 +500,22 @@ function test_delete_snapshot_with_snapshot() {
# Verify snapshots # Verify snapshots
run_fio_test /dev/nbd1 0 $size "read" "0xcc" run_fio_test /dev/nbd1 0 $size "read" "0xcc"
nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid2" /dev/nbd2 nbd_start_disks "$DEFAULT_RPC_ADDR" "$snapshot_uuid2" /dev/nbd2
run_fio_test /dev/nbd2 0 $(( first_part - 1 )) "read" "0xcc" run_fio_test /dev/nbd2 0 $((first_part - 1)) "read" "0xcc"
run_fio_test /dev/nbd2 $first_part $(( second_part - first_part )) "read" "0xee" run_fio_test /dev/nbd2 $first_part $((second_part - first_part)) "read" "0xee"
run_fio_test /dev/nbd2 $second_part $(( lvol_size - second_part )) "read" "0xcc" run_fio_test /dev/nbd2 $second_part $((lvol_size - second_part)) "read" "0xcc"
# Verify lvol bdev # Verify lvol bdev
run_fio_test /dev/nbd0 $first_part $(( second_part - first_part )) "read" "0xee" run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "read" "0xee"
run_fio_test /dev/nbd0 $second_part $(( lvol_size - second_part )) "read" "0xcc" run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xcc"
[ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ] [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ]
[ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot2"' ] [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot2"' ]
# Fill third part of lvol bdev # Fill third part of lvol bdev
run_fio_test /dev/nbd0 $second_part $(( lvol_size - second_part )) "write" "0xdd" run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "write" "0xdd"
# Verify snapshots # Verify snapshots
run_fio_test /dev/nbd1 0 $size "read" "0xcc" run_fio_test /dev/nbd1 0 $size "read" "0xcc"
run_fio_test /dev/nbd0 $second_part $(( lvol_size - second_part )) "read" "0xdd" run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xdd"
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd2 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd2
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd1
@ -530,8 +528,8 @@ function test_delete_snapshot_with_snapshot() {
[ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ] [ "$(jq '.[].driver_specific.lvol.clone' <<< "$lvol")" = "true" ]
[ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot"' ] [ "$(jq '.[].driver_specific.lvol.base_snapshot' <<< "$lvol")" = '"lvol_snapshot"' ]
[ "$(jq '.[].driver_specific.lvol.clones|sort' <<< "$snapshot")" = "$(jq '.|sort' <<< '["lvol_test"]')" ] [ "$(jq '.[].driver_specific.lvol.clones|sort' <<< "$snapshot")" = "$(jq '.|sort' <<< '["lvol_test"]')" ]
run_fio_test /dev/nbd0 $first_part $(( second_part - first_part )) "read" "0xee" run_fio_test /dev/nbd0 $first_part $((second_part - first_part)) "read" "0xee"
run_fio_test /dev/nbd0 $second_part $(( lvol_size - second_part )) "read" "0xdd" run_fio_test /dev/nbd0 $second_part $((lvol_size - second_part)) "read" "0xdd"
# Clean up # Clean up
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
@ -561,7 +559,7 @@ function test_bdev_lvol_delete_ordering() {
[[ ${jq_out["name"]} == "$lvstore_name" ]] [[ ${jq_out["name"]} == "$lvstore_name" ]]
[[ ${jq_out["base_bdev"]} == "$malloc_dev" ]] [[ ${jq_out["base_bdev"]} == "$malloc_dev" ]]
size=$(( jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024**2 )) size=$((jq_out["free_clusters"] * jq_out["cluster_size"] / 4 / 1024 ** 2))
bdev_uuid=$(rpc_cmd bdev_lvol_create -t -u "$lvstore_uuid" "$lbd_name" "$size") bdev_uuid=$(rpc_cmd bdev_lvol_create -t -u "$lvstore_uuid" "$lbd_name" "$size")

View File

@ -24,14 +24,14 @@ function test_tasting() {
rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid" && false
# Create a valid lvs # Create a valid lvs
lvs1_cluster_size=$(( 1 * 1024 * 1024 )) lvs1_cluster_size=$((1 * 1024 * 1024))
lvs2_cluster_size=$(( 32 * 1024 * 1024 )) lvs2_cluster_size=$((32 * 1024 * 1024))
lvs_uuid1=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev0 lvs_test1 -c $lvs1_cluster_size) lvs_uuid1=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev0 lvs_test1 -c $lvs1_cluster_size)
lvs_uuid2=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev1 lvs_test2 -c $lvs2_cluster_size) lvs_uuid2=$(rpc_cmd bdev_lvol_create_lvstore aio_bdev1 lvs_test2 -c $lvs2_cluster_size)
# Create 5 lvols on first lvs # Create 5 lvols on first lvs
lvol_size_mb=$(round_down $(( LVS_DEFAULT_CAPACITY_MB / 10 ))) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 10)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
for i in $(seq 1 5); do for i in $(seq 1 5); do
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid1" "lvol_test${i}" "$lvol_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid1" "lvol_test${i}" "$lvol_size_mb")
@ -41,12 +41,12 @@ function test_tasting() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test1/lvol_test${i}" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test1/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / AIO_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / AIO_BS))" ]
done done
# Create 5 lvols on second lvs # Create 5 lvols on second lvs
lvol2_size_mb=$(round_down $(( ( AIO_SIZE_MB - 16 ) / 5 )) 32) lvol2_size_mb=$(round_down $(((AIO_SIZE_MB - 16) / 5)) 32)
lvol2_size=$(( lvol2_size_mb * 1024 * 1024 )) lvol2_size=$((lvol2_size_mb * 1024 * 1024))
for i in $(seq 1 5); do for i in $(seq 1 5); do
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid2" "lvol_test${i}" "$lvol2_size_mb") lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid2" "lvol_test${i}" "$lvol2_size_mb")
@ -56,7 +56,7 @@ function test_tasting() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test2/lvol_test${i}" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test2/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol2_size / AIO_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol2_size / AIO_BS))" ]
done done
old_lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]') old_lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
@ -78,11 +78,11 @@ function test_tasting() {
new_lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]') new_lvols=$(rpc_cmd bdev_get_bdevs | jq -r '[ .[] | select(.product_name == "Logical Volume") ]')
[ "$(jq length <<< "$new_lvols")" == "10" ] [ "$(jq length <<< "$new_lvols")" == "10" ]
new_lvs=$(rpc_cmd bdev_lvol_get_lvstores | jq .) new_lvs=$(rpc_cmd bdev_lvol_get_lvstores | jq .)
if ! diff <(jq '. | sort' <<<"$old_lvs") <(jq '. | sort' <<<"$new_lvs"); then if ! diff <(jq '. | sort' <<< "$old_lvs") <(jq '. | sort' <<< "$new_lvs"); then
echo "ERROR: old and loaded lvol store is not the same" echo "ERROR: old and loaded lvol store is not the same"
return 1 return 1
fi fi
if ! diff <(jq '. | sort' <<<"$old_lvols") <(jq '. | sort' <<<"$new_lvols"); then if ! diff <(jq '. | sort' <<< "$old_lvols") <(jq '. | sort' <<< "$new_lvols"); then
echo "ERROR: old and loaded lvols are not the same" echo "ERROR: old and loaded lvols are not the same"
return 1 return 1
fi fi
@ -96,7 +96,7 @@ function test_tasting() {
[ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ] [ "$(jq -r '.[0].uuid' <<< "$lvol")" = "$lvol_uuid" ]
[ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test1/lvol_test${i}" ] [ "$(jq -r '.[0].aliases[0]' <<< "$lvol")" = "lvs_test1/lvol_test${i}" ]
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$AIO_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$(( lvol_size / AIO_BS ))" ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = "$((lvol_size / AIO_BS))" ]
done done
for i in $(seq 1 10); do for i in $(seq 1 10); do
@ -132,7 +132,7 @@ function test_delete_lvol_store_persistent_positive() {
get_bdev_jq bdev_get_bdevs -b "$bdev_aio_name" get_bdev_jq bdev_get_bdevs -b "$bdev_aio_name"
[[ ${jq_out["name"]} == "$bdev_aio_name" ]] [[ ${jq_out["name"]} == "$bdev_aio_name" ]]
[[ ${jq_out["product_name"]} == "AIO disk" ]] [[ ${jq_out["product_name"]} == "AIO disk" ]]
(( jq_out["block_size"] == bdev_block_size )) ((jq_out["block_size"] == bdev_block_size))
lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$bdev_aio_name" "$lvstore_name") lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$bdev_aio_name" "$lvstore_name")

View File

@ -15,7 +15,7 @@ function test_thin_lvol_check_space() {
free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")" free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")"
# Create thin provision lvol bdev with size equals to lvol store space # Create thin provision lvol bdev with size equals to lvol store space
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t) lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid") lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
@ -28,31 +28,31 @@ function test_thin_lvol_check_space() {
run_fio_test /dev/nbd0 0 $size "write" "0xcc" run_fio_test /dev/nbd0 0 $size "write" "0xcc"
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid") lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_first_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")" free_clusters_first_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
[ $(( free_clusters_first_fio + 1 )) == $free_clusters_start ] [ $((free_clusters_first_fio + 1)) == $free_clusters_start ]
# Write data (lvs cluster size) to lvol bdev with offset set to one and half of cluster size # Write data (lvs cluster size) to lvol bdev with offset set to one and half of cluster size
offset=$(( LVS_DEFAULT_CLUSTER_SIZE * 3 / 2 )) offset=$((LVS_DEFAULT_CLUSTER_SIZE * 3 / 2))
size=$LVS_DEFAULT_CLUSTER_SIZE size=$LVS_DEFAULT_CLUSTER_SIZE
run_fio_test /dev/nbd0 $offset $size "write" "0xcc" run_fio_test /dev/nbd0 $offset $size "write" "0xcc"
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid") lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_second_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")" free_clusters_second_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
[ $(( free_clusters_second_fio + 3 )) == $free_clusters_start ] [ $((free_clusters_second_fio + 3)) == $free_clusters_start ]
# write data to lvol bdev to the end of its size # write data to lvol bdev to the end of its size
size=$(( LVS_DEFAULT_CLUSTER_SIZE * free_clusters_first_fio )) size=$((LVS_DEFAULT_CLUSTER_SIZE * free_clusters_first_fio))
offset=$(( 3 * LVS_DEFAULT_CLUSTER_SIZE )) offset=$((3 * LVS_DEFAULT_CLUSTER_SIZE))
run_fio_test /dev/nbd0 $offset $size "write" "0xcc" run_fio_test /dev/nbd0 $offset $size "write" "0xcc"
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid") lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
# Check that lvol store free clusters number equals to 0 # Check that lvol store free clusters number equals to 0
free_clusters_third_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")" free_clusters_third_fio="$(jq -r '.[0].free_clusters' <<< "$lvs")"
[ $(( free_clusters_third_fio )) == 0 ] [ $((free_clusters_third_fio)) == 0 ]
nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0 nbd_stop_disks "$DEFAULT_RPC_ADDR" /dev/nbd0
rpc_cmd bdev_lvol_delete "$lvol_uuid" rpc_cmd bdev_lvol_delete "$lvol_uuid"
rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false rpc_cmd bdev_get_bdevs -b "$lvol_uuid" && false
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid") lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_end="$(jq -r '.[0].free_clusters' <<< "$lvs")" free_clusters_end="$(jq -r '.[0].free_clusters' <<< "$lvs")"
[ $(( free_clusters_end )) == $free_clusters_start ] [ $((free_clusters_end)) == $free_clusters_start ]
# Clean up # Clean up
rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid" rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid"
@ -71,10 +71,10 @@ function test_thin_lvol_check_zeroes() {
# Create thick and thin provisioned lvol bdevs with size equals to lvol store space # Create thick and thin provisioned lvol bdevs with size equals to lvol store space
lbd_name0=lvol_test0 lbd_name0=lvol_test0
lbd_name1=lvol_test1 lbd_name1=lvol_test1
lvol_size_mb=$(( LVS_DEFAULT_CAPACITY_MB )) lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB))
# Round down lvol size to the nearest cluster size boundary # Round down lvol size to the nearest cluster size boundary
lvol_size_mb=$(( lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB )) lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid0=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" $lbd_name0 "$lvol_size_mb") lvol_uuid0=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" $lbd_name0 "$lvol_size_mb")
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" $lbd_name1 "$lvol_size_mb" -t) lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" $lbd_name1 "$lvol_size_mb" -t)
@ -104,10 +104,10 @@ function test_thin_lvol_check_integrity() {
lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test) lvs_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_name" lvs_test)
# Create thin provisioned lvol bdev with size equals to lvol store space # Create thin provisioned lvol bdev with size equals to lvol store space
lvol_size_mb=$(( LVS_DEFAULT_CAPACITY_MB )) lvol_size_mb=$((LVS_DEFAULT_CAPACITY_MB))
# Round down lvol size to the nearest cluster size boundary # Round down lvol size to the nearest cluster size boundary
lvol_size_mb=$(( lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB )) lvol_size_mb=$((lvol_size_mb / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t) lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" /dev/nbd0
@ -127,8 +127,8 @@ function test_thin_lvol_resize() {
# Construct thin provisioned lvol bdevs on created lvol store # Construct thin provisioned lvol bdevs on created lvol store
# with size equal to 50% of lvol store # with size equal to 50% of lvol store
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 2 )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 2)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t) lvol_uuid=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test "$lvol_size_mb" -t)
# Fill all free space of lvol bdev with data # Fill all free space of lvol bdev with data
@ -140,15 +140,15 @@ function test_thin_lvol_resize() {
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid") lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")" free_clusters_start="$(jq -r '.[0].free_clusters' <<< "$lvs")"
# Resize bdev to full size of lvs # Resize bdev to full size of lvs
lvol_size_full_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB )) ) lvol_size_full_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
lvol_size_full=$(( lvol_size_full_mb * 1024 * 1024 )) lvol_size_full=$((lvol_size_full_mb * 1024 * 1024))
rpc_cmd bdev_lvol_resize $lvol_uuid $lvol_size_full_mb rpc_cmd bdev_lvol_resize $lvol_uuid $lvol_size_full_mb
# Check if bdev size changed (total_data_clusters*cluster_size # Check if bdev size changed (total_data_clusters*cluster_size
# equals to num_blocks*block_size) # equals to num_blocks*block_size)
lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid") lvol=$(rpc_cmd bdev_get_bdevs -b "$lvol_uuid")
[ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ] [ "$(jq -r '.[0].block_size' <<< "$lvol")" = "$MALLOC_BS" ]
[ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = $(( lvol_size_full / MALLOC_BS )) ] [ "$(jq -r '.[0].num_blocks' <<< "$lvol")" = $((lvol_size_full / MALLOC_BS)) ]
# Check if free_clusters on lvs remain unaffected # Check if free_clusters on lvs remain unaffected
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid") lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
@ -167,13 +167,13 @@ function test_thin_lvol_resize() {
[ $free_clusters_start == 0 ] [ $free_clusters_start == 0 ]
# Resize bdev to 25% of lvs and check if it ended with success # Resize bdev to 25% of lvs and check if it ended with success
lvol_size_quarter_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB / 4 )) ) lvol_size_quarter_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB / 4)))
rpc_cmd bdev_lvol_resize $lvol_uuid $lvol_size_quarter_mb rpc_cmd bdev_lvol_resize $lvol_uuid $lvol_size_quarter_mb
# Check free clusters on lvs # Check free clusters on lvs
lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid") lvs=$(rpc_cmd bdev_lvol_get_lvstores -u "$lvs_uuid")
free_clusters_resize_quarter="$(jq -r '.[0].free_clusters' <<< "$lvs")" free_clusters_resize_quarter="$(jq -r '.[0].free_clusters' <<< "$lvs")"
free_clusters_expected=$(( (lvol_size_full_mb - lvol_size_quarter_mb) / LVS_DEFAULT_CLUSTER_SIZE_MB )) free_clusters_expected=$(((lvol_size_full_mb - lvol_size_quarter_mb) / LVS_DEFAULT_CLUSTER_SIZE_MB))
[ $free_clusters_resize_quarter == $free_clusters_expected ] [ $free_clusters_resize_quarter == $free_clusters_expected ]
rpc_cmd bdev_lvol_delete "$lvol_uuid" rpc_cmd bdev_lvol_delete "$lvol_uuid"
@ -187,16 +187,16 @@ function test_thin_overprovisioning() {
# Construct two thin provisioned lvol bdevs on created lvol store # Construct two thin provisioned lvol bdevs on created lvol store
# with size equal to free lvol store size # with size equal to free lvol store size
lvol_size_mb=$( round_down $(( LVS_DEFAULT_CAPACITY_MB )) ) lvol_size_mb=$(round_down $((LVS_DEFAULT_CAPACITY_MB)))
lvol_size=$(( lvol_size_mb * 1024 * 1024 )) lvol_size=$((lvol_size_mb * 1024 * 1024))
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t) lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t)
lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb" -t) lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb" -t)
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" /dev/nbd1 nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid2" /dev/nbd1
# Fill first bdev to 50% of its space with specific pattern # Fill first bdev to 50% of its space with specific pattern
fill_size=$(( lvol_size_mb * 5 / 10 / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB )) fill_size=$((lvol_size_mb * 5 / 10 / LVS_DEFAULT_CLUSTER_SIZE_MB * LVS_DEFAULT_CLUSTER_SIZE_MB))
fill_size=$(( fill_size * 1024 * 1024)) fill_size=$((fill_size * 1024 * 1024))
run_fio_test /dev/nbd0 0 $fill_size "write" "0xcc" run_fio_test /dev/nbd0 0 $fill_size "write" "0xcc"
# Fill second bdev up to 50% of its space # Fill second bdev up to 50% of its space
@ -205,7 +205,7 @@ function test_thin_overprovisioning() {
# Fill rest of second bdev # Fill rest of second bdev
# Check that error message occured while filling second bdev with data # Check that error message occured while filling second bdev with data
offset=$fill_size offset=$fill_size
fill_size_rest=$(( lvol_size - fill_size )) fill_size_rest=$((lvol_size - fill_size))
run_fio_test /dev/nbd1 "$offset" "$fill_size_rest" "write" "0xcc" && false run_fio_test /dev/nbd1 "$offset" "$fill_size_rest" "write" "0xcc" && false
# Check if data on first disk stayed unchanged # Check if data on first disk stayed unchanged

View File

@ -26,7 +26,7 @@ function confirm_abi_deps() {
return 1 return 1
fi fi
cat <<EOF > ${suppression_file} cat << EOF > ${suppression_file}
[suppress_variable] [suppress_variable]
name = SPDK_LOG_BDEV name = SPDK_LOG_BDEV
[suppress_variable] [suppress_variable]
@ -99,7 +99,7 @@ EOF
touch $fail_file touch $fail_file
fi fi
if [ "$new_so_maj" != "$old_so_maj" ] && [ "$new_so_min" != "0" ]; then if [ "$new_so_maj" != "$old_so_maj" ] && [ "$new_so_min" != "0" ]; then
echo "SO major version for $so_file was bumped. Please reset the minor version to 0." echo "SO major version for $so_file was bumped. Please reset the minor version to 0."
touch $fail_file touch $fail_file
fi fi
@ -107,7 +107,7 @@ EOF
continue continue
fi fi
processed_so=$((processed_so+1)) processed_so=$((processed_so + 1))
done done
rm -f $suppression_file rm -f $suppression_file
echo "Processed $processed_so objects." echo "Processed $processed_so objects."
@ -131,7 +131,7 @@ function replace_defined_variables() {
for dep in "${bad_values[@]}"; do for dep in "${bad_values[@]}"; do
dep_def_arr=($(grep -v "#" $libdeps_file | grep "${dep}" | cut -d "=" -f 2 | xargs)) dep_def_arr=($(grep -v "#" $libdeps_file | grep "${dep}" | cut -d "=" -f 2 | xargs))
new_values=($(replace_defined_variables "${dep_def_arr[@]}")) new_values=($(replace_defined_variables "${dep_def_arr[@]}"))
good_values=( "${good_values[@]}" "${new_values[@]}" ) good_values=("${good_values[@]}" "${new_values[@]}")
done done
echo ${good_values[*]} echo ${good_values[*]}
} }
@ -175,9 +175,9 @@ function confirm_deps() {
done done
IFS=$'\n' IFS=$'\n'
# Ignore any event_* dependencies. Those are based on the subsystem configuration and not readelf. # Ignore any event_* dependencies. Those are based on the subsystem configuration and not readelf.
lib_make_deps=( $(printf "%s\n" "${lib_make_deps[@]}" | sort | grep -v "event_") ) lib_make_deps=($(printf "%s\n" "${lib_make_deps[@]}" | sort | grep -v "event_"))
# Ignore the env_dpdk readelf dependency. We don't want people explicitly linking against it. # Ignore the env_dpdk readelf dependency. We don't want people explicitly linking against it.
dep_names=( $(printf "%s\n" "${dep_names[@]}" | sort | uniq | grep -v "env_dpdk") ) dep_names=($(printf "%s\n" "${dep_names[@]}" | sort | uniq | grep -v "env_dpdk"))
unset IFS unset IFS
diff=$(echo "${dep_names[@]}" "${lib_make_deps[@]}" | tr ' ' '\n' | sort | uniq -u) diff=$(echo "${dep_names[@]}" "${lib_make_deps[@]}" | tr ' ' '\n' | sort | uniq -u)
if [ "$diff" != "" ]; then if [ "$diff" != "" ]; then
@ -224,7 +224,10 @@ if grep -q 'CONFIG_VHOST_INTERNAL_LIB?=n' $rootdir/mk/config.mk; then
IGNORED_LIBS+=("rte_vhost") IGNORED_LIBS+=("rte_vhost")
fi fi
( for lib in $SPDK_LIBS; do confirm_deps $lib & done; wait ) (
for lib in $SPDK_LIBS; do confirm_deps $lib & done
wait
)
$MAKE $MAKEFLAGS clean $MAKE $MAKEFLAGS clean
git checkout "$rootdir/mk/spdk.lib.mk" git checkout "$rootdir/mk/spdk.lib.mk"

View File

@ -12,7 +12,7 @@ fi
function ssh_vm() { function ssh_vm() {
xtrace_disable xtrace_disable
sshpass -p "$password" ssh -o PubkeyAuthentication=no \ sshpass -p "$password" ssh -o PubkeyAuthentication=no \
-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 10022 root@localhost "$@" -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 10022 root@localhost "$@"
xtrace_restore xtrace_restore
} }
@ -110,7 +110,10 @@ ssh_vm 'echo ready'
timing_exit wait_for_vm timing_exit wait_for_vm
timing_enter copy_repo timing_enter copy_repo
(cd "$rootdir"; tar -cf - .) | (ssh_vm 'tar -xf -') (
cd "$rootdir"
tar -cf - .
) | (ssh_vm 'tar -xf -')
timing_exit copy_repo timing_exit copy_repo
devices_initialization devices_initialization

View File

@ -48,7 +48,7 @@ trap 'killprocess $example_pid; exit 1' SIGINT SIGTERM EXIT
i=0 i=0
while ! grep "Starting I/O" log.txt; do while ! grep "Starting I/O" log.txt; do
[ $i -lt 20 ] || break [ $i -lt 20 ] || break
i=$((i+1)) i=$((i + 1))
sleep 1 sleep 1
done done

View File

@ -5,7 +5,7 @@ rootdir=$(readlink -f $testdir/../..)
source $rootdir/scripts/common.sh source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh source $rootdir/test/common/autotest_common.sh
function nvme_identify { function nvme_identify() {
$rootdir/examples/nvme/identify/identify -i 0 $rootdir/examples/nvme/identify/identify -i 0
for bdf in $(get_nvme_bdfs); do for bdf in $(get_nvme_bdfs); do
$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:${bdf}" -i 0 $rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:${bdf}" -i 0
@ -13,7 +13,7 @@ function nvme_identify {
timing_exit identify timing_exit identify
} }
function nvme_perf { function nvme_perf() {
# enable no shutdown notification option # enable no shutdown notification option
$rootdir/examples/nvme/perf/perf -q 128 -w read -o 12288 -t 1 -LL -i 0 -N $rootdir/examples/nvme/perf/perf -q 128 -w read -o 12288 -t 1 -LL -i 0 -N
$rootdir/examples/nvme/perf/perf -q 128 -w write -o 12288 -t 1 -LL -i 0 $rootdir/examples/nvme/perf/perf -q 128 -w write -o 12288 -t 1 -LL -i 0
@ -23,7 +23,7 @@ function nvme_perf {
fi fi
} }
function nvme_fio_test { function nvme_fio_test() {
PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
for bdf in $(get_nvme_bdfs); do for bdf in $(get_nvme_bdfs); do
for blkname in $(get_nvme_name_from_bdf $bdf); do for blkname in $(get_nvme_name_from_bdf $bdf); do
@ -32,7 +32,7 @@ function nvme_fio_test {
done done
} }
function nvme_multi_secondary { function nvme_multi_secondary() {
$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x1 & $rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x1 &
pid0=$! pid0=$!
$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x2 & $rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x2 &

View File

@ -14,10 +14,10 @@ NVME_FIO_RESULTS=$BASE_DIR/result.json
declare -A KERNEL_ENGINES declare -A KERNEL_ENGINES
KERNEL_ENGINES=( KERNEL_ENGINES=(
["kernel-libaio"]="--ioengine=libaio" ["kernel-libaio"]="--ioengine=libaio"
["kernel-classic-polling"]="--ioengine=pvsync2 --hipri=100" ["kernel-classic-polling"]="--ioengine=pvsync2 --hipri=100"
["kernel-hybrid-polling"]="--ioengine=pvsync2 --hipri=100" ["kernel-hybrid-polling"]="--ioengine=pvsync2 --hipri=100"
["kernel-io-uring"]="--ioengine=io_uring" ) ["kernel-io-uring"]="--ioengine=io_uring")
RW=randrw RW=randrw
MIX=100 MIX=100
@ -45,27 +45,27 @@ function is_bdf_not_mounted() {
return $mountpoints return $mountpoints
} }
function get_cores(){ function get_cores() {
local cpu_list="$1" local cpu_list="$1"
for cpu in ${cpu_list//,/ }; do for cpu in ${cpu_list//,/ }; do
echo $cpu echo $cpu
done done
} }
function get_cores_numa_node(){ function get_cores_numa_node() {
local cores=$1 local cores=$1
for core in $cores; do for core in $cores; do
lscpu -p=cpu,node | grep "^$core\b" | awk -F ',' '{print $2}' lscpu -p=cpu,node | grep "^$core\b" | awk -F ',' '{print $2}'
done done
} }
function get_numa_node(){ function get_numa_node() {
local plugin=$1 local plugin=$1
local disks=$2 local disks=$2
if [[ "$plugin" =~ "nvme" ]]; then if [[ "$plugin" =~ "nvme" ]]; then
for bdf in $disks; do for bdf in $disks; do
local driver local driver
driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent |awk -F"=" '{print $2}') driver=$(grep DRIVER /sys/bus/pci/devices/$bdf/uevent | awk -F"=" '{print $2}')
# Use this check to ommit blacklisted devices ( not binded to driver with setup.sh script ) # Use this check to ommit blacklisted devices ( not binded to driver with setup.sh script )
if [ "$driver" = "vfio-pci" ] || [ "$driver" = "uio_pci_generic" ]; then if [ "$driver" = "vfio-pci" ] || [ "$driver" = "uio_pci_generic" ]; then
cat /sys/bus/pci/devices/$bdf/numa_node cat /sys/bus/pci/devices/$bdf/numa_node
@ -89,7 +89,7 @@ function get_numa_node(){
fi fi
} }
function get_disks(){ function get_disks() {
local plugin=$1 local plugin=$1
if [[ "$plugin" =~ "nvme" ]]; then if [[ "$plugin" =~ "nvme" ]]; then
for bdf in $(get_nvme_bdfs); do for bdf in $(get_nvme_bdfs); do
@ -111,23 +111,22 @@ function get_disks(){
fi fi
} }
function get_disks_on_numa(){ function get_disks_on_numa() {
local devs=($1) local devs=($1)
local numas=($2) local numas=($2)
local numa_no=$3 local numa_no=$3
local disks_on_numa="" local disks_on_numa=""
local i local i
for (( i=0; i<${#devs[@]}; i++ )) for ((i = 0; i < ${#devs[@]}; i++)); do
do
if [ ${numas[$i]} = $numa_no ]; then if [ ${numas[$i]} = $numa_no ]; then
disks_on_numa=$((disks_on_numa+1)) disks_on_numa=$((disks_on_numa + 1))
fi fi
done done
echo $disks_on_numa echo $disks_on_numa
} }
function create_fio_config(){ function create_fio_config() {
local disk_no=$1 local disk_no=$1
local plugin=$2 local plugin=$2
local disks=($3) local disks=($3)
@ -139,35 +138,32 @@ function create_fio_config(){
local cores_numa local cores_numa
cores_numa=($(get_cores_numa_node "$5")) cores_numa=($(get_cores_numa_node "$5"))
local disks_per_core=$((disk_no/no_cores)) local disks_per_core=$((disk_no / no_cores))
local disks_per_core_mod=$((disk_no%no_cores)) local disks_per_core_mod=$((disk_no % no_cores))
# For kernel dirver, each disk will be alligned with all cpus on the same NUMA node # For kernel dirver, each disk will be alligned with all cpus on the same NUMA node
if [[ "$plugin" =~ "kernel" ]]; then if [[ "$plugin" =~ "kernel" ]]; then
for (( i=0; i<disk_no; i++ )) for ((i = 0; i < disk_no; i++)); do
do
sed -i -e "\$a[filename${i}]" $BASE_DIR/config.fio sed -i -e "\$a[filename${i}]" $BASE_DIR/config.fio
filename="/dev/${disks[$i]}" filename="/dev/${disks[$i]}"
sed -i -e "\$afilename=$filename" $BASE_DIR/config.fio sed -i -e "\$afilename=$filename" $BASE_DIR/config.fio
cpu_used="" cpu_used=""
for (( j=0; j<no_cores; j++ )) for ((j = 0; j < no_cores; j++)); do
do core_numa=${cores_numa[$j]}
core_numa=${cores_numa[$j]} if [ "${disks_numa[$i]}" = "$core_numa" ]; then
if [ "${disks_numa[$i]}" = "$core_numa" ]; then cpu_used+="${cores[$j]},"
cpu_used+="${cores[$j]}," fi
fi done
done sed -i -e "\$acpus_allowed=$cpu_used" $BASE_DIR/config.fio
sed -i -e "\$acpus_allowed=$cpu_used" $BASE_DIR/config.fio echo "" >> $BASE_DIR/config.fio
echo "" >> $BASE_DIR/config.fio
done done
else else
for (( i=0; i<no_cores; i++ )) for ((i = 0; i < no_cores; i++)); do
do
core_numa=${cores_numa[$i]} core_numa=${cores_numa[$i]}
total_disks_per_core=$disks_per_core total_disks_per_core=$disks_per_core
if [ "$disks_per_core_mod" -gt "0" ]; then if [ "$disks_per_core_mod" -gt "0" ]; then
total_disks_per_core=$((disks_per_core+1)) total_disks_per_core=$((disks_per_core + 1))
disks_per_core_mod=$((disks_per_core_mod-1)) disks_per_core_mod=$((disks_per_core_mod - 1))
fi fi
if [ "$total_disks_per_core" = "0" ]; then if [ "$total_disks_per_core" = "0" ]; then
@ -177,11 +173,11 @@ function create_fio_config(){
sed -i -e "\$a[filename${i}]" $BASE_DIR/config.fio sed -i -e "\$a[filename${i}]" $BASE_DIR/config.fio
#use cpus_allowed as cpumask works only for cores 1-32 #use cpus_allowed as cpumask works only for cores 1-32
sed -i -e "\$acpus_allowed=${cores[$i]}" $BASE_DIR/config.fio sed -i -e "\$acpus_allowed=${cores[$i]}" $BASE_DIR/config.fio
m=0 #counter of disks per cpu core numa m=0 #counter of disks per cpu core numa
n=0 #counter of all disks n=0 #counter of all disks
while [ "$m" -lt "$total_disks_per_core" ]; do while [ "$m" -lt "$total_disks_per_core" ]; do
if [ ${disks_numa[$n]} = $core_numa ]; then if [ ${disks_numa[$n]} = $core_numa ]; then
m=$((m+1)) m=$((m + 1))
if [[ "$plugin" = "spdk-plugin-nvme" ]]; then if [[ "$plugin" = "spdk-plugin-nvme" ]]; then
filename='trtype=PCIe traddr='${disks[$n]//:/.}' ns=1' filename='trtype=PCIe traddr='${disks[$n]//:/.}' ns=1'
elif [[ "$plugin" = "spdk-plugin-bdev" ]]; then elif [[ "$plugin" = "spdk-plugin-bdev" ]]; then
@ -191,7 +187,7 @@ function create_fio_config(){
#Mark numa of n'th disk as "x" to mark it as claimed #Mark numa of n'th disk as "x" to mark it as claimed
disks_numa[$n]="x" disks_numa[$n]="x"
fi fi
n=$((n+1)) n=$((n + 1))
# If there is no more disks with numa node same as cpu numa node, switch to other numa node. # If there is no more disks with numa node same as cpu numa node, switch to other numa node.
if [ $n -ge $total_disks ]; then if [ $n -ge $total_disks ]; then
if [ "$core_numa" = "1" ]; then if [ "$core_numa" = "1" ]; then
@ -231,66 +227,66 @@ function preconditioning() {
rm -f $BASE_DIR/config.fio rm -f $BASE_DIR/config.fio
} }
function get_results(){ function get_results() {
local reads_pct=$2 local reads_pct=$2
local writes_pct=$((100-$2)) local writes_pct=$((100 - $2))
case "$1" in case "$1" in
iops) iops)
iops=$(jq -r '.jobs[] | (.read.iops + .write.iops)' $NVME_FIO_RESULTS) iops=$(jq -r '.jobs[] | (.read.iops + .write.iops)' $NVME_FIO_RESULTS)
iops=${iops%.*} iops=${iops%.*}
echo $iops echo $iops
;; ;;
mean_lat_usec) mean_lat_usec)
mean_lat=$(jq -r ".jobs[] | (.read.lat_ns.mean * $reads_pct + .write.lat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS) mean_lat=$(jq -r ".jobs[] | (.read.lat_ns.mean * $reads_pct + .write.lat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS)
mean_lat=${mean_lat%.*} mean_lat=${mean_lat%.*}
echo $(( mean_lat/100000 )) echo $((mean_lat / 100000))
;; ;;
p99_lat_usec) p99_lat_usec)
p99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.000000\" * $reads_pct + .write.clat_ns.percentile.\"99.000000\" * $writes_pct)" $NVME_FIO_RESULTS) p99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.000000\" * $reads_pct + .write.clat_ns.percentile.\"99.000000\" * $writes_pct)" $NVME_FIO_RESULTS)
p99_lat=${p99_lat%.*} p99_lat=${p99_lat%.*}
echo $(( p99_lat/100000 )) echo $((p99_lat / 100000))
;; ;;
p99_99_lat_usec) p99_99_lat_usec)
p99_99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.990000\" * $reads_pct + .write.clat_ns.percentile.\"99.990000\" * $writes_pct)" $NVME_FIO_RESULTS) p99_99_lat=$(jq -r ".jobs[] | (.read.clat_ns.percentile.\"99.990000\" * $reads_pct + .write.clat_ns.percentile.\"99.990000\" * $writes_pct)" $NVME_FIO_RESULTS)
p99_99_lat=${p99_99_lat%.*} p99_99_lat=${p99_99_lat%.*}
echo $(( p99_99_lat/100000 )) echo $((p99_99_lat / 100000))
;; ;;
stdev_usec) stdev_usec)
stdev=$(jq -r ".jobs[] | (.read.clat_ns.stddev * $reads_pct + .write.clat_ns.stddev * $writes_pct)" $NVME_FIO_RESULTS) stdev=$(jq -r ".jobs[] | (.read.clat_ns.stddev * $reads_pct + .write.clat_ns.stddev * $writes_pct)" $NVME_FIO_RESULTS)
stdev=${stdev%.*} stdev=${stdev%.*}
echo $(( stdev/100000 )) echo $((stdev / 100000))
;; ;;
mean_slat_usec) mean_slat_usec)
mean_slat=$(jq -r ".jobs[] | (.read.slat_ns.mean * $reads_pct + .write.slat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS) mean_slat=$(jq -r ".jobs[] | (.read.slat_ns.mean * $reads_pct + .write.slat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS)
mean_slat=${mean_slat%.*} mean_slat=${mean_slat%.*}
echo $(( mean_slat/100000 )) echo $((mean_slat / 100000))
;; ;;
mean_clat_usec) mean_clat_usec)
mean_clat=$(jq -r ".jobs[] | (.read.clat_ns.mean * $reads_pct + .write.clat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS) mean_clat=$(jq -r ".jobs[] | (.read.clat_ns.mean * $reads_pct + .write.clat_ns.mean * $writes_pct)" $NVME_FIO_RESULTS)
mean_clat=${mean_clat%.*} mean_clat=${mean_clat%.*}
echo $(( mean_clat/100000 )) echo $((mean_clat / 100000))
;; ;;
bw_Kibs) bw_Kibs)
bw=$(jq -r ".jobs[] | (.read.bw + .write.bw)" $NVME_FIO_RESULTS) bw=$(jq -r ".jobs[] | (.read.bw + .write.bw)" $NVME_FIO_RESULTS)
bw=${bw%.*} bw=${bw%.*}
echo $(( bw )) echo $((bw))
;; ;;
esac esac
} }
function get_bdevperf_results(){ function get_bdevperf_results() {
case "$1" in case "$1" in
iops) iops)
iops=$(grep Total $NVME_FIO_RESULTS | awk -F 'Total' '{print $2}' | awk '{print $2}') iops=$(grep Total $NVME_FIO_RESULTS | awk -F 'Total' '{print $2}' | awk '{print $2}')
iops=${iops%.*} iops=${iops%.*}
echo $iops echo $iops
;; ;;
bw_Kibs) bw_Kibs)
bw_MBs=$(grep Total $NVME_FIO_RESULTS | awk -F 'Total' '{print $2}' | awk '{print $4}') bw_MBs=$(grep Total $NVME_FIO_RESULTS | awk -F 'Total' '{print $2}' | awk '{print $4}')
bw_MBs=${bw_MBs%.*} bw_MBs=${bw_MBs%.*}
echo $(( bw_MBs * 1024 )) echo $((bw_MBs * 1024))
;; ;;
esac esac
} }
@ -301,7 +297,7 @@ function get_nvmeperf_results() {
local max_lat_usec local max_lat_usec
local min_lat_usec local min_lat_usec
read -r iops bw_MBs mean_lat_usec min_lat_usec max_lat_usec<<< $(tr -s " " < $NVME_FIO_RESULTS | grep -oP "(?<=Total : )(.*+)") read -r iops bw_MBs mean_lat_usec min_lat_usec max_lat_usec <<< $(tr -s " " < $NVME_FIO_RESULTS | grep -oP "(?<=Total : )(.*+)")
# We need to get rid of the decimal spaces due # We need to get rid of the decimal spaces due
# to use of arithmetic expressions instead of "bc" for calculations # to use of arithmetic expressions instead of "bc" for calculations
@ -314,27 +310,25 @@ function get_nvmeperf_results() {
echo "$iops $(bc <<< "$bw_MBs * 1024") $mean_lat_usec $min_lat_usec $max_lat_usec" echo "$iops $(bc <<< "$bw_MBs * 1024") $mean_lat_usec $min_lat_usec $max_lat_usec"
} }
function run_spdk_nvme_fio(){ function run_spdk_nvme_fio() {
local plugin=$1 local plugin=$1
echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting." echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
if [[ "$plugin" = "spdk-plugin-nvme" ]]; then if [[ "$plugin" = "spdk-plugin-nvme" ]]; then
LD_PRELOAD=$PLUGIN_DIR_NVME/fio_plugin $FIO_BIN $BASE_DIR/config.fio --output-format=json\ LD_PRELOAD=$PLUGIN_DIR_NVME/fio_plugin $FIO_BIN $BASE_DIR/config.fio --output-format=json "${@:2}" --ioengine=spdk
"${@:2}" --ioengine=spdk
elif [[ "$plugin" = "spdk-plugin-bdev" ]]; then elif [[ "$plugin" = "spdk-plugin-bdev" ]]; then
LD_PRELOAD=$PLUGIN_DIR_BDEV/fio_plugin $FIO_BIN $BASE_DIR/config.fio --output-format=json\ LD_PRELOAD=$PLUGIN_DIR_BDEV/fio_plugin $FIO_BIN $BASE_DIR/config.fio --output-format=json "${@:2}" --ioengine=spdk_bdev --spdk_json_conf=$BASE_DIR/bdev.conf --spdk_mem=4096
"${@:2}" --ioengine=spdk_bdev --spdk_json_conf=$BASE_DIR/bdev.conf --spdk_mem=4096
fi fi
sleep 1 sleep 1
} }
function run_nvme_fio(){ function run_nvme_fio() {
echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting." echo "** Running fio test, this can take a while, depending on the run-time and ramp-time setting."
$FIO_BIN $BASE_DIR/config.fio --output-format=json "$@" $FIO_BIN $BASE_DIR/config.fio --output-format=json "$@"
sleep 1 sleep 1
} }
function run_bdevperf(){ function run_bdevperf() {
echo "** Running bdevperf test, this can take a while, depending on the run-time setting." echo "** Running bdevperf test, this can take a while, depending on the run-time setting."
$BDEVPERF_DIR/bdevperf --json $BASE_DIR/bdev.conf -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -m "[$CPUS_ALLOWED]" $BDEVPERF_DIR/bdevperf --json $BASE_DIR/bdev.conf -q $IODEPTH -o $BLK_SIZE -w $RW -M $MIX -t $RUNTIME -m "[$CPUS_ALLOWED]"
sleep 1 sleep 1
@ -346,8 +340,8 @@ function run_nvmeperf() {
local disks local disks
# Limit the number of disks to $1 if needed # Limit the number of disks to $1 if needed
disks=( $(get_disks nvme) ) disks=($(get_disks nvme))
disks=( "${disks[@]:0:$1}" ) disks=("${disks[@]:0:$1}")
r_opt=$(printf -- ' -r "trtype:PCIe traddr:%s"' "${disks[@]}") r_opt=$(printf -- ' -r "trtype:PCIe traddr:%s"' "${disks[@]}")
echo "** Running nvme perf test, this can take a while, depending on the run-time setting." echo "** Running nvme perf test, this can take a while, depending on the run-time setting."
@ -363,7 +357,7 @@ function wait_for_nvme_reload() {
shopt -s extglob shopt -s extglob
for disk in $nvmes; do for disk in $nvmes; do
cmd="ls /sys/block/$disk/queue/*@(iostats|rq_affinity|nomerges|io_poll_delay)*" cmd="ls /sys/block/$disk/queue/*@(iostats|rq_affinity|nomerges|io_poll_delay)*"
until $cmd 2>/dev/null; do until $cmd 2> /dev/null; do
echo "Waiting for full nvme driver reload..." echo "Waiting for full nvme driver reload..."
sleep 0.5 sleep 0.5
done done
@ -374,7 +368,7 @@ function wait_for_nvme_reload() {
function verify_disk_number() { function verify_disk_number() {
# Check if we have appropriate number of disks to carry out the test # Check if we have appropriate number of disks to carry out the test
if [[ "$PLUGIN" =~ "bdev" ]]; then if [[ "$PLUGIN" =~ "bdev" ]]; then
cat <<-JSON >"$BASE_DIR/bdev.conf" cat <<- JSON > "$BASE_DIR/bdev.conf"
{"subsystems":[ {"subsystems":[
$("$ROOT_DIR/scripts/gen_nvme.sh" --json) $("$ROOT_DIR/scripts/gen_nvme.sh" --json)
]} ]}
@ -390,10 +384,12 @@ function verify_disk_number() {
fi fi
} }
function usage() function usage() {
{
set +x set +x
[[ -n $2 ]] && ( echo "$2"; echo ""; ) [[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Run NVMe PMD/BDEV performance test. Change options for easier debug and setup configuration" echo "Run NVMe PMD/BDEV performance test. Change options for easier debug and setup configuration"
echo "Usage: $(basename $1) [options]" echo "Usage: $(basename $1) [options]"
echo "-h, --help Print help and exit" echo "-h, --help Print help and exit"
@ -436,27 +432,42 @@ function usage()
while getopts 'h-:' optchar; do while getopts 'h-:' optchar; do
case "$optchar" in case "$optchar" in
-) -)
case "$OPTARG" in case "$OPTARG" in
help) usage $0; exit 0 ;; help)
rw=*) RW="${OPTARG#*=}" ;; usage $0
rwmixread=*) MIX="${OPTARG#*=}" ;; exit 0
iodepth=*) IODEPTH="${OPTARG#*=}" ;; ;;
block-size=*) BLK_SIZE="${OPTARG#*=}" ;; rw=*) RW="${OPTARG#*=}" ;;
run-time=*) RUNTIME="${OPTARG#*=}" ;; rwmixread=*) MIX="${OPTARG#*=}" ;;
ramp-time=*) RAMP_TIME="${OPTARG#*=}" ;; iodepth=*) IODEPTH="${OPTARG#*=}" ;;
numjobs=*) NUMJOBS="${OPTARG#*=}" ;; block-size=*) BLK_SIZE="${OPTARG#*=}" ;;
repeat-no=*) REPEAT_NO="${OPTARG#*=}" ;; run-time=*) RUNTIME="${OPTARG#*=}" ;;
fio-bin=*) FIO_BIN="${OPTARG#*=}" ;; ramp-time=*) RAMP_TIME="${OPTARG#*=}" ;;
driver=*) PLUGIN="${OPTARG#*=}" ;; numjobs=*) NUMJOBS="${OPTARG#*=}" ;;
disk-no=*) DISKNO="${OPTARG#*=}"; ONEWORKLOAD=true ;; repeat-no=*) REPEAT_NO="${OPTARG#*=}" ;;
max-disk=*) DISKNO="${OPTARG#*=}" ;; fio-bin=*) FIO_BIN="${OPTARG#*=}" ;;
cpu-allowed=*) CPUS_ALLOWED="${OPTARG#*=}" ;; driver=*) PLUGIN="${OPTARG#*=}" ;;
no-preconditioning) PRECONDITIONING=false ;; disk-no=*)
no-io-scaling) NOIOSCALING=true ;; DISKNO="${OPTARG#*=}"
*) usage $0 echo "Invalid argument '$OPTARG'"; exit 1 ;; ONEWORKLOAD=true
esac ;;
;; max-disk=*) DISKNO="${OPTARG#*=}" ;;
h) usage $0; exit 0 ;; cpu-allowed=*) CPUS_ALLOWED="${OPTARG#*=}" ;;
*) usage $0 "Invalid argument '$optchar'"; exit 1 ;; no-preconditioning) PRECONDITIONING=false ;;
no-io-scaling) NOIOSCALING=true ;;
*)
usage $0 echo "Invalid argument '$OPTARG'"
exit 1
;;
esac
;;
h)
usage $0
exit 0
;;
*)
usage $0 "Invalid argument '$optchar'"
exit 1
;;
esac esac
done done

View File

@ -96,17 +96,15 @@ echo "run-time,ramp-time,fio-plugin,QD,block-size,num-cpu-cores,workload,workloa
printf "%s,%s,%s,%s,%s,%s,%s,%s\n" $RUNTIME $RAMP_TIME $PLUGIN $IODEPTH $BLK_SIZE $NO_CORES $RW $MIX >> $result_file printf "%s,%s,%s,%s,%s,%s,%s,%s\n" $RUNTIME $RAMP_TIME $PLUGIN $IODEPTH $BLK_SIZE $NO_CORES $RW $MIX >> $result_file
echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec],avg_slat[usec],avg_clat[usec],bw[Kib/s]" >> $result_file echo "num_of_disks,iops,avg_lat[usec],p99[usec],p99.99[usec],stdev[usec],avg_slat[usec],avg_clat[usec],bw[Kib/s]" >> $result_file
#Run each workolad $REPEAT_NO times #Run each workolad $REPEAT_NO times
for (( j=0; j < REPEAT_NO; j++ )) for ((j = 0; j < REPEAT_NO; j++)); do
do
#Start with $DISKNO disks and remove 2 disks for each run to avoid preconditioning before each run. #Start with $DISKNO disks and remove 2 disks for each run to avoid preconditioning before each run.
for (( k=DISKNO; k >= 1; k-=2 )) for ((k = DISKNO; k >= 1; k -= 2)); do
do
cp $BASE_DIR/config.fio.tmp $BASE_DIR/config.fio cp $BASE_DIR/config.fio.tmp $BASE_DIR/config.fio
echo "" >> $BASE_DIR/config.fio echo "" >> $BASE_DIR/config.fio
#The SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread. #The SPDK fio plugin supports submitting/completing I/Os to multiple SSDs from a single thread.
#Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread. #Therefore, the per thread queue depth is set to the desired IODEPTH/device X the number of devices per thread.
if [[ "$PLUGIN" =~ "spdk-plugin" ]] && [[ "$NOIOSCALING" = false ]]; then if [[ "$PLUGIN" =~ "spdk-plugin" ]] && [[ "$NOIOSCALING" = false ]]; then
qd=$(( IODEPTH * k )) qd=$((IODEPTH * k))
else else
qd=$IODEPTH qd=$IODEPTH
fi fi
@ -142,17 +140,17 @@ do
time_based=1 time_based=1
description=$desc description=$desc
log_avg_msec=250 log_avg_msec=250
EOF EOF
echo "USING CONFIG:" echo "USING CONFIG:"
cat $BASE_DIR/config.fio cat $BASE_DIR/config.fio
if [[ "$PLUGIN" =~ "spdk-plugin" ]]; then if [[ "$PLUGIN" =~ "spdk-plugin" ]]; then
run_spdk_nvme_fio $PLUGIN "--output=$NVME_FIO_RESULTS" \ run_spdk_nvme_fio $PLUGIN "--output=$NVME_FIO_RESULTS" \
"--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}" "--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
else else
run_nvme_fio $fio_ioengine_opt "--output=$NVME_FIO_RESULTS" \ run_nvme_fio $fio_ioengine_opt "--output=$NVME_FIO_RESULTS" \
"--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}" "--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
fi fi
#Store values for every number of used disks #Store values for every number of used disks
@ -177,8 +175,7 @@ do
done done
done done
#Write results to csv file #Write results to csv file
for (( k=DISKNO; k >= 1; k-=2 )) for ((k = DISKNO; k >= 1; k -= 2)); do
do
iops_disks[$k]=$((${iops_disks[$k]} / REPEAT_NO)) iops_disks[$k]=$((${iops_disks[$k]} / REPEAT_NO))
if [[ "$PLUGIN" =~ "plugin" ]]; then if [[ "$PLUGIN" =~ "plugin" ]]; then
@ -206,8 +203,8 @@ do
bw[$k]=$((${bw[$k]} / REPEAT_NO)) bw[$k]=$((${bw[$k]} / REPEAT_NO))
printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${k} ${iops_disks[$k]} ${mean_lat_disks_usec[$k]} ${p99_lat_disks_usec[$k]}\ printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" ${k} ${iops_disks[$k]} ${mean_lat_disks_usec[$k]} ${p99_lat_disks_usec[$k]} \
${p99_99_lat_disks_usec[$k]} ${stdev_disks_usec[$k]} ${mean_slat_disks_usec[$k]} ${mean_clat_disks_usec[$k]} ${bw[$k]} >> $result_file ${p99_99_lat_disks_usec[$k]} ${stdev_disks_usec[$k]} ${mean_slat_disks_usec[$k]} ${mean_clat_disks_usec[$k]} ${bw[$k]} >> $result_file
#if tested on only one numeber of disk #if tested on only one numeber of disk
if $ONEWORKLOAD; then if $ONEWORKLOAD; then

View File

@ -6,8 +6,8 @@ source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh source $rootdir/test/common/autotest_common.sh
if [ -z "${DEPENDENCY_DIR}" ]; then if [ -z "${DEPENDENCY_DIR}" ]; then
echo DEPENDENCY_DIR not defined! echo DEPENDENCY_DIR not defined!
exit 1 exit 1
fi fi
spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli" spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli"

View File

@ -18,20 +18,20 @@ bdf=$(get_first_nvme_bdf)
PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
sleep 1 sleep 1
bdf_sysfs_path=$( readlink -f /sys/class/nvme/nvme* | grep "$bdf/nvme/nvme" ) bdf_sysfs_path=$(readlink -f /sys/class/nvme/nvme* | grep "$bdf/nvme/nvme")
if [ -z "$bdf_sysfs_path" ]; then if [ -z "$bdf_sysfs_path" ]; then
echo "setup.sh failed bind kernel driver to ${bdf}" echo "setup.sh failed bind kernel driver to ${bdf}"
return 1 return 1
fi fi
nvme_name=$( basename $bdf_sysfs_path ) nvme_name=$(basename $bdf_sysfs_path)
set +e set +e
ctrlr="/dev/${nvme_name}" ctrlr="/dev/${nvme_name}"
ns="/dev/${nvme_name}n1" ns="/dev/${nvme_name}n1"
oacs=$( ${NVME_CMD} id-ctrl $ctrlr | grep oacs | cut -d: -f2 ) oacs=$(${NVME_CMD} id-ctrl $ctrlr | grep oacs | cut -d: -f2)
oacs_firmware=$(( oacs & 0x4 )) oacs_firmware=$((oacs & 0x4))
${NVME_CMD} get-ns-id $ns > ${KERNEL_OUT}.1 ${NVME_CMD} get-ns-id $ns > ${KERNEL_OUT}.1
${NVME_CMD} id-ns $ns > ${KERNEL_OUT}.2 ${NVME_CMD} id-ns $ns > ${KERNEL_OUT}.2

View File

@ -12,19 +12,19 @@ bdf=$(get_first_nvme_bdf)
PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset PCI_WHITELIST="${bdf}" $rootdir/scripts/setup.sh reset
sleep 1 sleep 1
bdf_sysfs_path=$( readlink -f /sys/class/nvme/nvme* | grep "$bdf/nvme/nvme" ) bdf_sysfs_path=$(readlink -f /sys/class/nvme/nvme* | grep "$bdf/nvme/nvme")
if [ -z "$bdf_sysfs_path" ]; then if [ -z "$bdf_sysfs_path" ]; then
echo "setup.sh failed bind kernel driver to ${bdf}" echo "setup.sh failed bind kernel driver to ${bdf}"
exit 1 exit 1
fi fi
nvme_name=$( basename $bdf_sysfs_path ) nvme_name=$(basename $bdf_sysfs_path)
KERNEL_SMART_JSON=$( ${SMARTCTL_CMD} --json=g -a /dev/${nvme_name} | grep -v "/dev/${nvme_name}" | sort || true ) KERNEL_SMART_JSON=$(${SMARTCTL_CMD} --json=g -a /dev/${nvme_name} | grep -v "/dev/${nvme_name}" | sort || true)
${SMARTCTL_CMD} -i /dev/${nvme_name}n1 ${SMARTCTL_CMD} -i /dev/${nvme_name}n1
# logs are not provided by json output # logs are not provided by json output
KERNEL_SMART_ERRLOG=$( ${SMARTCTL_CMD} -l error /dev/${nvme_name} ) KERNEL_SMART_ERRLOG=$(${SMARTCTL_CMD} -l error /dev/${nvme_name})
$rootdir/scripts/setup.sh $rootdir/scripts/setup.sh
@ -43,19 +43,19 @@ if [ ! -c /dev/spdk/nvme0 ]; then
exit 1 exit 1
fi fi
CUSE_SMART_JSON=$( ${SMARTCTL_CMD} --json=g -a /dev/spdk/nvme0 | grep -v "/dev/spdk/nvme0" | sort || true ) CUSE_SMART_JSON=$(${SMARTCTL_CMD} --json=g -a /dev/spdk/nvme0 | grep -v "/dev/spdk/nvme0" | sort || true)
DIFF_SMART_JSON=$( diff --changed-group-format='%<' --unchanged-group-format='' <(echo "$KERNEL_SMART_JSON") <(echo "$CUSE_SMART_JSON") || true) DIFF_SMART_JSON=$(diff --changed-group-format='%<' --unchanged-group-format='' <(echo "$KERNEL_SMART_JSON") <(echo "$CUSE_SMART_JSON") || true)
# Mask values can change # Mask values can change
ERR_SMART_JSON=$( grep -v "json\.nvme_smart_health_information_log\.\|json\.local_time\.\|json\.temperature\.\|json\.power_on_time\.hours" <<< $DIFF_SMART_JSON || true ) ERR_SMART_JSON=$(grep -v "json\.nvme_smart_health_information_log\.\|json\.local_time\.\|json\.temperature\.\|json\.power_on_time\.hours" <<< $DIFF_SMART_JSON || true)
if [ -n "$ERR_SMART_JSON" ] ; then if [ -n "$ERR_SMART_JSON" ]; then
echo "Wrong values for: $ERR_SMART_JSON" echo "Wrong values for: $ERR_SMART_JSON"
exit 1 exit 1
fi fi
CUSE_SMART_ERRLOG=$( ${SMARTCTL_CMD} -l error /dev/spdk/nvme0 ) CUSE_SMART_ERRLOG=$(${SMARTCTL_CMD} -l error /dev/spdk/nvme0)
if [ "$CUSE_SMART_ERRLOG" != "$KERNEL_SMART_ERRLOG" ]; then if [ "$CUSE_SMART_ERRLOG" != "$KERNEL_SMART_ERRLOG" ]; then
echo "Wrong values in NVMe Error log" echo "Wrong values in NVMe Error log"
exit 1 exit 1

View File

@ -5,8 +5,7 @@ NVMF_TCP_IP_ADDRESS="127.0.0.1"
NVMF_TRANSPORT_OPTS="" NVMF_TRANSPORT_OPTS=""
NVMF_SERIAL=SPDK00000000000001 NVMF_SERIAL=SPDK00000000000001
function build_nvmf_app_args() function build_nvmf_app_args() {
{
if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
NVMF_APP=(sudo -u "$USER" "${NVMF_APP[@]}") NVMF_APP=(sudo -u "$USER" "${NVMF_APP[@]}")
NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF) NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
@ -15,13 +14,13 @@ function build_nvmf_app_args()
fi fi
} }
: ${NVMF_APP_SHM_ID="0"}; export NVMF_APP_SHM_ID : ${NVMF_APP_SHM_ID="0"}
export NVMF_APP_SHM_ID
build_nvmf_app_args build_nvmf_app_args
have_pci_nics=0 have_pci_nics=0
function load_ib_rdma_modules() function load_ib_rdma_modules() {
{
if [ $(uname) != Linux ]; then if [ $(uname) != Linux ]; then
return 0 return 0
fi fi
@ -37,9 +36,7 @@ function load_ib_rdma_modules()
modprobe rdma_ucm modprobe rdma_ucm
} }
function detect_soft_roce_nics() {
function detect_soft_roce_nics()
{
if hash rxe_cfg; then if hash rxe_cfg; then
rxe_cfg start rxe_cfg start
rdma_nics=$(get_rdma_if_list) rdma_nics=$(get_rdma_if_list)
@ -54,12 +51,10 @@ function detect_soft_roce_nics()
fi fi
} }
# args 1 and 2 represent the grep filters for finding our NICS. # args 1 and 2 represent the grep filters for finding our NICS.
# subsequent args are all drivers that should be loaded if we find these NICs. # subsequent args are all drivers that should be loaded if we find these NICs.
# Those drivers should be supplied in the correct order. # Those drivers should be supplied in the correct order.
function detect_nics_and_probe_drivers() function detect_nics_and_probe_drivers() {
{
NIC_VENDOR="$1" NIC_VENDOR="$1"
NIC_CLASS="$2" NIC_CLASS="$2"
@ -80,9 +75,7 @@ function detect_nics_and_probe_drivers()
fi fi
} }
function detect_pci_nics() {
function detect_pci_nics()
{
if ! hash lspci; then if ! hash lspci; then
return 0 return 0
@ -101,38 +94,34 @@ function detect_pci_nics()
sleep 5 sleep 5
} }
function detect_rdma_nics() function detect_rdma_nics() {
{
detect_pci_nics detect_pci_nics
if [ "$have_pci_nics" -eq "0" ]; then if [ "$have_pci_nics" -eq "0" ]; then
detect_soft_roce_nics detect_soft_roce_nics
fi fi
} }
function allocate_nic_ips() function allocate_nic_ips() {
{ ((count = NVMF_IP_LEAST_ADDR))
(( count=NVMF_IP_LEAST_ADDR ))
for nic_name in $(get_rdma_if_list); do for nic_name in $(get_rdma_if_list); do
ip="$(get_ip_address $nic_name)" ip="$(get_ip_address $nic_name)"
if [ -z $ip ]; then if [ -z $ip ]; then
ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
ip link set $nic_name up ip link set $nic_name up
(( count=count+1 )) ((count = count + 1))
fi fi
# dump configuration for debug log # dump configuration for debug log
ip addr show $nic_name ip addr show $nic_name
done done
} }
function get_available_rdma_ips() function get_available_rdma_ips() {
{
for nic_name in $(get_rdma_if_list); do for nic_name in $(get_rdma_if_list); do
get_ip_address $nic_name get_ip_address $nic_name
done done
} }
function get_rdma_if_list() function get_rdma_if_list() {
{
for nic_type in /sys/class/infiniband/*; do for nic_type in /sys/class/infiniband/*; do
[[ -e "$nic_type" ]] || break [[ -e "$nic_type" ]] || break
for nic_name in /sys/class/infiniband/"$(basename ${nic_type})"/device/net/*; do for nic_name in /sys/class/infiniband/"$(basename ${nic_type})"/device/net/*; do
@ -142,14 +131,12 @@ function get_rdma_if_list()
done done
} }
function get_ip_address() function get_ip_address() {
{
interface=$1 interface=$1
ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1 ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1
} }
function nvmfcleanup() function nvmfcleanup() {
{
sync sync
set +e set +e
for i in {1..20}; do for i in {1..20}; do
@ -170,8 +157,7 @@ function nvmfcleanup()
modprobe -v -r nvme-fabrics modprobe -v -r nvme-fabrics
} }
function nvmftestinit() function nvmftestinit() {
{
if [ -z $TEST_TRANSPORT ]; then if [ -z $TEST_TRANSPORT ]; then
echo "transport not specified - use --transport= to specify" echo "transport not specified - use --transport= to specify"
return 1 return 1
@ -205,8 +191,7 @@ function nvmftestinit()
modprobe nvme-$TEST_TRANSPORT || true modprobe nvme-$TEST_TRANSPORT || true
} }
function nvmfappstart() function nvmfappstart() {
{
timing_enter start_nvmf_tgt timing_enter start_nvmf_tgt
"${NVMF_APP[@]}" $1 & "${NVMF_APP[@]}" $1 &
nvmfpid=$! nvmfpid=$!
@ -215,8 +200,7 @@ function nvmfappstart()
timing_exit start_nvmf_tgt timing_exit start_nvmf_tgt
} }
function nvmftestfini() function nvmftestfini() {
{
nvmfcleanup || : nvmfcleanup || :
if [ -n "$nvmfpid" ]; then if [ -n "$nvmfpid" ]; then
killprocess $nvmfpid killprocess $nvmfpid
@ -229,15 +213,13 @@ function nvmftestfini()
fi fi
} }
function rdma_device_init() function rdma_device_init() {
{
load_ib_rdma_modules load_ib_rdma_modules
detect_rdma_nics detect_rdma_nics
allocate_nic_ips allocate_nic_ips
} }
function revert_soft_roce() function revert_soft_roce() {
{
if hash rxe_cfg; then if hash rxe_cfg; then
interfaces="$(ip -o link | awk '{print $2}' | cut -d":" -f1)" interfaces="$(ip -o link | awk '{print $2}' | cut -d":" -f1)"
for interface in $interfaces; do for interface in $interfaces; do
@ -247,8 +229,7 @@ function revert_soft_roce()
fi fi
} }
function check_ip_is_soft_roce() function check_ip_is_soft_roce() {
{
IP=$1 IP=$1
if hash rxe_cfg; then if hash rxe_cfg; then
dev=$(ip -4 -o addr show | grep $IP | cut -d" " -f2) dev=$(ip -4 -o addr show | grep $IP | cut -d" " -f2)
@ -262,8 +243,7 @@ function check_ip_is_soft_roce()
fi fi
} }
function nvme_connect() function nvme_connect() {
{
local init_count local init_count
init_count=$(nvme list | wc -l) init_count=$(nvme list | wc -l)
@ -279,8 +259,7 @@ function nvme_connect()
return 1 return 1
} }
function get_nvme_devs() function get_nvme_devs() {
{
local dev rest local dev rest
nvmes=() nvmes=()
@ -292,18 +271,17 @@ function get_nvme_devs()
echo "$dev $rest" echo "$dev $rest"
fi fi
done < <(nvme list) done < <(nvme list)
(( ${#nvmes[@]} )) || return 1 ((${#nvmes[@]})) || return 1
echo "${#nvmes[@]}" >&2 echo "${#nvmes[@]}" >&2
} }
function gen_nvmf_target_json() function gen_nvmf_target_json() {
{
local subsystem config=() local subsystem config=()
for subsystem in "${@:-1}"; do for subsystem in "${@:-1}"; do
config+=( config+=(
"$( "$(
cat <<-EOF cat <<- EOF
{ {
"params": { "params": {
"name": "Nvme$subsystem", "name": "Nvme$subsystem",
@ -319,13 +297,16 @@ function gen_nvmf_target_json()
)" )"
) )
done done
jq . <<-JSON jq . <<- JSON
{ {
"subsystems": [ "subsystems": [
{ {
"subsystem": "bdev", "subsystem": "bdev",
"config": [ "config": [
$(IFS=","; printf '%s\n' "${config[*]}") $(
IFS=","
printf '%s\n' "${config[*]}"
)
] ]
} }
] ]

View File

@ -10,8 +10,7 @@ MALLOC_BLOCK_SIZE=512
rpc_py="$rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
function tgt_init() function tgt_init() {
{
nvmfappstart "-m 0xF" nvmfappstart "-m 0xF"
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
@ -29,7 +28,6 @@ if [ $TEST_TRANSPORT == "rdma" ] && check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP;
exit 0 exit 0
fi fi
tgt_init tgt_init
"$rootdir/test/bdev/bdevperf/bdevperf" --json <(gen_nvmf_target_json) -q 128 -o 4096 -w verify -t 1 "$rootdir/test/bdev/bdevperf/bdevperf" --json <(gen_nvmf_target_json) -q 128 -o 4096 -w verify -t 1

View File

@ -12,8 +12,7 @@ MALLOC_BLOCK_SIZE=512
rpc_py="$rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
function disconnect_init() function disconnect_init() {
{
nvmfappstart "-m 0xF0" nvmfappstart "-m 0xF0"
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
@ -27,10 +26,10 @@ function disconnect_init()
# Test to make sure we don't segfault or access null pointers when we try to connect to # Test to make sure we don't segfault or access null pointers when we try to connect to
# a discovery controller that doesn't exist yet. # a discovery controller that doesn't exist yet.
function nvmf_target_disconnect_tc1 { function nvmf_target_disconnect_tc1() {
set +e set +e
$rootdir/examples/nvme/reconnect/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \ $rootdir/examples/nvme/reconnect/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
-r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
# If the program crashes, the high bit of $? will be set so we will get a value in the hundreds. # If the program crashes, the high bit of $? will be set so we will get a value in the hundreds.
# But if the reconnect code detects errors and exits normally it will return 1. # But if the reconnect code detects errors and exits normally it will return 1.
if [ $? != 1 ]; then if [ $? != 1 ]; then
@ -40,12 +39,12 @@ function nvmf_target_disconnect_tc1 {
set -e set -e
} }
function nvmf_target_disconnect_tc2 { function nvmf_target_disconnect_tc2() {
disconnect_init $NVMF_FIRST_TARGET_IP disconnect_init $NVMF_FIRST_TARGET_IP
# If perf doesn't shut down, this test will time out. # If perf doesn't shut down, this test will time out.
$rootdir/examples/nvme/reconnect/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \ $rootdir/examples/nvme/reconnect/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
-r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" & -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" &
reconnectpid=$! reconnectpid=$!
sleep 2 sleep 2
@ -58,9 +57,9 @@ function nvmf_target_disconnect_tc2 {
sync sync
} }
function nvmf_target_disconnect_tc3 { function nvmf_target_disconnect_tc3() {
$rootdir/examples/nvme/reconnect/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \ $rootdir/examples/nvme/reconnect/reconnect -q 32 -o 4096 -w randrw -M 50 -t 10 -c 0xF \
-r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT alt_traddr:$NVMF_SECOND_TARGET_IP" & -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT alt_traddr:$NVMF_SECOND_TARGET_IP" &
reconnectpid=$! reconnectpid=$!
sleep 2 sleep 2
@ -86,6 +85,5 @@ else
fi fi
fi fi
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT
nvmftestfini nvmftestfini

View File

@ -11,7 +11,7 @@ source $rootdir/test/nvmf/common.sh
trap "exit 1" SIGINT SIGTERM EXIT trap "exit 1" SIGINT SIGTERM EXIT
TEST_ARGS=( "$@" ) TEST_ARGS=("$@")
run_test "nvmf_example" test/nvmf/target/nvmf_example.sh "${TEST_ARGS[@]}" run_test "nvmf_example" test/nvmf/target/nvmf_example.sh "${TEST_ARGS[@]}"
run_test "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}" run_test "nvmf_filesystem" test/nvmf/target/filesystem.sh "${TEST_ARGS[@]}"

View File

@ -21,13 +21,13 @@ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x10 -i 1 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w write -t 1 & "$rootdir/test/bdev/bdevperf/bdevperf" -m 0x10 -i 1 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w write -t 1 &
WRITE_PID=$! WRITE_PID=$!
"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x20 -i 2 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w read -t 1 & "$rootdir/test/bdev/bdevperf/bdevperf" -m 0x20 -i 2 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w read -t 1 &
READ_PID=$! READ_PID=$!
"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x40 -i 3 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w flush -t 1 & "$rootdir/test/bdev/bdevperf/bdevperf" -m 0x40 -i 3 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w flush -t 1 &
FLUSH_PID=$! FLUSH_PID=$!
"$rootdir/test/bdev/bdevperf/bdevperf" -m 0x80 -i 4 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w unmap -t 1 & "$rootdir/test/bdev/bdevperf/bdevperf" -m 0x80 -i 4 --json <(gen_nvmf_target_json) -q 128 -o 4096 -w unmap -t 1 &
UNMAP_PID=$! UNMAP_PID=$!

View File

@ -18,7 +18,7 @@ $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0 $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001 $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc0
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rootdir/test/bdev/bdevio/bdevio --json <(gen_nvmf_target_json) $rootdir/test/bdev/bdevio/bdevio --json <(gen_nvmf_target_json)

View File

@ -25,7 +25,7 @@ for i in $(seq 1 4); do
$rpc_py bdev_null_create Null$i $NULL_BDEV_SIZE $NULL_BLOCK_SIZE $rpc_py bdev_null_create Null$i $NULL_BDEV_SIZE $NULL_BLOCK_SIZE
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK0000000000000$i $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK0000000000000$i
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Null$i $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Null$i
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$i -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
done done
nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT nvme discover -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT

View File

@ -12,7 +12,7 @@ rpc_py="$rootdir/scripts/rpc.py"
nvmftestinit nvmftestinit
function nvmf_filesystem_create { function nvmf_filesystem_create() {
fstype=$1 fstype=$1
nvme_name=$2 nvme_name=$2
@ -27,7 +27,7 @@ function nvmf_filesystem_create {
i=0 i=0
while ! umount /mnt/device; do while ! umount /mnt/device; do
[ $i -lt 15 ] || break [ $i -lt 15 ] || break
i=$((i+1)) i=$((i + 1))
sleep 1 sleep 1
done done
@ -41,7 +41,7 @@ function nvmf_filesystem_create {
lsblk -l -o NAME | grep -q -w "${nvme_name}p1" lsblk -l -o NAME | grep -q -w "${nvme_name}p1"
} }
function nvmf_filesystem_part { function nvmf_filesystem_part() {
incapsule=$1 incapsule=$1
nvmfappstart "-m 0xF" nvmfappstart "-m 0xF"
@ -59,7 +59,7 @@ function nvmf_filesystem_part {
mkdir -p /mnt/device mkdir -p /mnt/device
parted -s /dev/${nvme_name} mklabel msdos mkpart primary '0%' '100%' parted -s /dev/${nvme_name} mklabel msdos mkpart primary '0%' '100%'
partprobe partprobe
sleep 1 sleep 1

View File

@ -59,11 +59,11 @@ wait $fio_pid || fio_status=$?
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
if [ $fio_status -eq 0 ]; then if [ $fio_status -eq 0 ]; then
echo "nvmf hotplug test: fio successful - expected failure" echo "nvmf hotplug test: fio successful - expected failure"
nvmftestfini nvmftestfini
exit 1 exit 1
else else
echo "nvmf hotplug test: fio failed as expected" echo "nvmf hotplug test: fio failed as expected"
fi fi
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1

View File

@ -9,7 +9,7 @@ rpc_py="$rootdir/scripts/rpc.py"
nvmftestinit nvmftestinit
"${NVMF_APP[@]}" -m 0xF >$output_dir/nvmf_fuzz_tgt_output.txt 2>&1 & "${NVMF_APP[@]}" -m 0xF > $output_dir/nvmf_fuzz_tgt_output.txt 2>&1 &
nvmfpid=$! nvmfpid=$!
trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT trap 'process_shm --id $NVMF_APP_SHM_ID; rm -f $testdir/nvmf_fuzz.conf; killprocess $nvmfpid; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
@ -27,9 +27,9 @@ echo "[Nvme]" > $testdir/nvmf_fuzz.conf
echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf echo " TransportID \"trtype:$TEST_TRANSPORT adrfam:IPv4 subnqn:nqn.2016-06.io.spdk:cnode1 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT\" Nvme0" >> $testdir/nvmf_fuzz.conf
# Note that we chose a consistent seed to ensure that this test is consistent in nightly builds. # Note that we chose a consistent seed to ensure that this test is consistent in nightly builds.
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t 30 -S 123456 -C $testdir/nvmf_fuzz.conf -N -a 2>$output_dir/nvmf_fuzz_logs1.txt $rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -t 30 -S 123456 -C $testdir/nvmf_fuzz.conf -N -a 2> $output_dir/nvmf_fuzz_logs1.txt
# We don't specify a seed for this test. Instead we run a static list of commands from example.json. # We don't specify a seed for this test. Instead we run a static list of commands from example.json.
$rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -C $testdir/nvmf_fuzz.conf -j $rootdir/test/app/fuzz/nvme_fuzz/example.json -a 2>$output_dir/nvmf_fuzz_logs2.txt $rootdir/test/app/fuzz/nvme_fuzz/nvme_fuzz -m 0xF0 -r "/var/tmp/nvme_fuzz" -C $testdir/nvmf_fuzz.conf -j $rootdir/test/app/fuzz/nvme_fuzz/example.json -a 2> $output_dir/nvmf_fuzz_logs2.txt
rm -f $testdir/nvmf_fuzz.conf rm -f $testdir/nvmf_fuzz.conf
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1

View File

@ -13,7 +13,7 @@ nvmftestinit
timing_enter nvme_identify timing_enter nvme_identify
bdf=$(get_first_nvme_bdf) bdf=$(get_first_nvme_bdf)
if [ -z "${bdf}" ] ; then if [ -z "${bdf}" ]; then
echo "No NVMe drive found but test requires it. Failing the test." echo "No NVMe drive found but test requires it. Failing the test."
exit 1 exit 1
fi fi
@ -59,12 +59,12 @@ nvmf_model_number=$($rootdir/examples/nvme/identify/identify -r "\
trsvcid:$NVMF_PORT \ trsvcid:$NVMF_PORT \
subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Model Number:" | awk '{print $3}') subnqn:nqn.2016-06.io.spdk:cnode1" | grep "Model Number:" | awk '{print $3}')
if [ ${nvme_serial_number} != ${nvmf_serial_number} ] ; then if [ ${nvme_serial_number} != ${nvmf_serial_number} ]; then
echo "Serial number doesn't match" echo "Serial number doesn't match"
exit 1 exit 1
fi fi
if [ ${nvme_model_number} != ${nvmf_model_number} ] ; then if [ ${nvme_model_number} != ${nvmf_model_number} ]; then
echo "Model number doesn't match" echo "Model number doesn't match"
exit 1 exit 1
fi fi

View File

@ -55,11 +55,11 @@ wait $fio_pid || fio_status=$?
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" || true
if [ $fio_status -eq 0 ]; then if [ $fio_status -eq 0 ]; then
echo "nvmf hotplug test: fio successful as expected" echo "nvmf hotplug test: fio successful as expected"
else else
echo "nvmf hotplug test: fio failed, expected success" echo "nvmf hotplug test: fio failed, expected success"
nvmftestfini nvmftestfini
exit 1 exit 1
fi fi
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1

View File

@ -12,14 +12,13 @@ target=foobar
# pre-seed the rng to generate predictive values across different test runs # pre-seed the rng to generate predictive values across different test runs
RANDOM=0 RANDOM=0
gen_random_s() {
gen_random_s () {
local length=$1 ll local length=$1 ll
# generate ascii table which nvme supports # generate ascii table which nvme supports
local chars=({32..127}) local chars=({32..127})
local string local string
for (( ll = 0; ll < length; ll++ )); do for ((ll = 0; ll < length; ll++)); do
string+="$(echo -e "\x$(printf '%x' "${chars[RANDOM % ${#chars[@]}]}")")" string+="$(echo -e "\x$(printf '%x' "${chars[RANDOM % ${#chars[@]}]}")")"
done done
# Be nice to rpc.py's arg parser and escape `-` in case it's a first character # Be nice to rpc.py's arg parser and escape `-` in case it's a first character
@ -34,7 +33,6 @@ nvmfappstart "-m 0xF"
trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM EXIT
# Attempt to create subsystem with non-existing target # Attempt to create subsystem with non-existing target
out=$("$rpc" nvmf_create_subsystem -t "$target" "$nqn$RANDOM" 2>&1) && false out=$("$rpc" nvmf_create_subsystem -t "$target" "$nqn$RANDOM" 2>&1) && false
[[ $out == *"Unable to find target"* ]] [[ $out == *"Unable to find target"* ]]

View File

@ -25,8 +25,7 @@ fi
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 $rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
for i in $(seq 1 $NVMF_SUBSYS) for i in $(seq 1 $NVMF_SUBSYS); do
do
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i $rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc$i
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode$i -a -s SPDK$i
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode$i Malloc$i

View File

@ -15,21 +15,21 @@ trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini $1; exit 1' SIGINT SIGTERM
# Target application should start with a single target. # Target application should start with a single target.
if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then
echo "SPDK application did not start with the proper number of targets." && false echo "SPDK application did not start with the proper number of targets." && false
fi fi
$rpc_py nvmf_create_target -n nvmf_tgt_1 -s 32 $rpc_py nvmf_create_target -n nvmf_tgt_1 -s 32
$rpc_py nvmf_create_target -n nvmf_tgt_2 -s 32 $rpc_py nvmf_create_target -n nvmf_tgt_2 -s 32
if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "3" ]; then if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "3" ]; then
echo "nvmf_create_target RPC didn't properly create targets." && false echo "nvmf_create_target RPC didn't properly create targets." && false
fi fi
$rpc_py nvmf_delete_target -n nvmf_tgt_1 $rpc_py nvmf_delete_target -n nvmf_tgt_1
$rpc_py nvmf_delete_target -n nvmf_tgt_2 $rpc_py nvmf_delete_target -n nvmf_tgt_2
if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then if [ "$($rpc_py nvmf_get_targets | jq 'length')" != "1" ]; then
echo "nvmf_delete_target RPC didn't properly destroy targets." && false echo "nvmf_delete_target RPC didn't properly destroy targets." && false
fi fi
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT

View File

@ -6,8 +6,8 @@ source $rootdir/test/common/autotest_common.sh
source $rootdir/test/nvmf/common.sh source $rootdir/test/nvmf/common.sh
if [ -z "${DEPENDENCY_DIR}" ]; then if [ -z "${DEPENDENCY_DIR}" ]; then
echo DEPENDENCY_DIR not defined! echo DEPENDENCY_DIR not defined!
exit 1 exit 1
fi fi
spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli" spdk_nvme_cli="${DEPENDENCY_DIR}/nvme-cli"
@ -33,7 +33,7 @@ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPOR
nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
waitforserial $NVMF_SERIAL 2 waitforserial $NVMF_SERIAL 2
if ! get_nvme_devs print 2>/dev/null; then if ! get_nvme_devs print 2> /dev/null; then
echo "Could not find any nvme devices to work with, aborting the test" >&2 echo "Could not find any nvme devices to work with, aborting the test" >&2
exit 1 exit 1
fi fi
@ -54,7 +54,7 @@ done
nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
if [ -d $spdk_nvme_cli ]; then if [ -d $spdk_nvme_cli ]; then
# Test spdk/nvme-cli NVMe-oF commands: discover, connect and disconnect # Test spdk/nvme-cli NVMe-oF commands: discover, connect and disconnect
cd $spdk_nvme_cli cd $spdk_nvme_cli
sed -i 's/shm_id=.*/shm_id=-1/g' spdk.conf sed -i 's/shm_id=.*/shm_id=-1/g' spdk.conf

View File

@ -10,25 +10,23 @@ rpc_py="$rootdir/scripts/rpc.py"
MALLOC_BDEV_SIZE=64 MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512 MALLOC_BLOCK_SIZE=512
function build_nvmf_example_args() function build_nvmf_example_args() {
{ if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then echo "sudo -u $(logname) ./examples/nvmf/nvmf/nvmf -i $NVMF_APP_SHM_ID"
echo "sudo -u $(logname) ./examples/nvmf/nvmf/nvmf -i $NVMF_APP_SHM_ID" else
else echo "./examples/nvmf/nvmf/nvmf -i $NVMF_APP_SHM_ID"
echo "./examples/nvmf/nvmf/nvmf -i $NVMF_APP_SHM_ID" fi
fi
} }
NVMF_EXAMPLE="$(build_nvmf_example_args)" NVMF_EXAMPLE="$(build_nvmf_example_args)"
function nvmfexamplestart() function nvmfexamplestart() {
{ timing_enter start_nvmf_example
timing_enter start_nvmf_example $NVMF_EXAMPLE $1 &
$NVMF_EXAMPLE $1 & nvmfpid=$!
nvmfpid=$! trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT waitforlisten $nvmfpid
waitforlisten $nvmfpid timing_exit start_nvmf_example
timing_exit start_nvmf_example
} }
timing_enter nvmf_example_test timing_enter nvmf_example_test
@ -44,7 +42,7 @@ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK0000000000000
#add ns to subsystem #add ns to subsystem
for malloc_bdev in $malloc_bdevs; do for malloc_bdev in $malloc_bdevs; do
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev" $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
done done
#add listener to subsystem #add listener to subsystem
@ -53,7 +51,7 @@ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPOR
perf="$rootdir/examples/nvme/perf/perf" perf="$rootdir/examples/nvme/perf/perf"
$perf -q 64 -o 4096 -w randrw -M 30 -t 10 \ $perf -q 64 -o 4096 -w randrw -M 30 -t 10 \
-r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \ -r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
subnqn:nqn.2016-06.io.spdk:cnode1" subnqn:nqn.2016-06.io.spdk:cnode1"
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT

View File

@ -52,7 +52,7 @@ $VHOST_RPC vhost_create_scsi_controller naa.VhostScsi0.3
$VHOST_RPC vhost_scsi_controller_add_target naa.VhostScsi0.3 0 "Nvme0n1" $VHOST_RPC vhost_scsi_controller_add_target naa.VhostScsi0.3 0 "Nvme0n1"
# start qemu based VM. # start qemu based VM.
vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_scsi --disks="VhostScsi0" --force=3 --vhost-name=3 vm_setup --os="$VM_IMAGE" --disk-type=spdk_vhost_scsi --disks="VhostScsi0" --force=3 --vhost-name=3
vm_run 3 vm_run 3

View File

@ -7,16 +7,14 @@ source $rootdir/test/nvmf/common.sh
rpc_py="$rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
function jcount() function jcount() {
{ local filter=$1
local filter=$1 jq "$filter" | wc -l
jq "$filter" | wc -l
} }
function jsum() function jsum() {
{ local filter=$1
local filter=$1 jq "$filter" | awk '{s+=$1}END{print s}'
jq "$filter" | awk '{s+=$1}END{print s}'
} }
nvmftestinit nvmftestinit
@ -35,11 +33,11 @@ stats=$($rpc_py nvmf_get_stats)
[ "0" -eq $(jsum .poll_groups[].io_qpairs <<< "$stats") ] [ "0" -eq $(jsum .poll_groups[].io_qpairs <<< "$stats") ]
# Transport statistics is currently implemented for RDMA only # Transport statistics is currently implemented for RDMA only
if [ 'rdma' == $TEST_TRANSPORT ]; then if [ 'rdma' == $TEST_TRANSPORT ]; then
# Expect RDMA transport and some devices # Expect RDMA transport and some devices
[ "1" -eq $(jcount .poll_groups[0].transports[].trtype <<< "$stats") ] [ "1" -eq $(jcount .poll_groups[0].transports[].trtype <<< "$stats") ]
transport_type=$(jq -r .poll_groups[0].transports[0].trtype <<< "$stats") transport_type=$(jq -r .poll_groups[0].transports[0].trtype <<< "$stats")
[ "${transport_type,,}" == "${TEST_TRANSPORT,,}" ] [ "${transport_type,,}" == "${TEST_TRANSPORT,,}" ]
[ "0" -lt $(jcount .poll_groups[0].transports[0].devices[].name <<< "$stats") ] [ "0" -lt $(jcount .poll_groups[0].transports[0].devices[].name <<< "$stats") ]
fi fi
# set times for subsystem construct/delete # set times for subsystem construct/delete
@ -82,8 +80,7 @@ nvme disconnect -n nqn.2016-06.io.spdk:cnode1
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1 $rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode1
# do frequent add delete of namespaces with different nsid. # do frequent add delete of namespaces with different nsid.
for i in $(seq 1 $times) for i in $(seq 1 $times); do
do
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 5 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 -n 5
@ -100,8 +97,7 @@ do
done done
# do frequent add delete. # do frequent add delete.
for i in $(seq 1 $times) for i in $(seq 1 $times); do
do
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -s $NVMF_SERIAL
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1 $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
@ -118,9 +114,9 @@ stats=$($rpc_py nvmf_get_stats)
[ "0" -lt $(jsum .poll_groups[].io_qpairs <<< "$stats") ] [ "0" -lt $(jsum .poll_groups[].io_qpairs <<< "$stats") ]
# Transport statistics is currently implemented for RDMA only # Transport statistics is currently implemented for RDMA only
if [ 'rdma' == $TEST_TRANSPORT ]; then if [ 'rdma' == $TEST_TRANSPORT ]; then
# Expect non-zero completions and request latencies accumulated # Expect non-zero completions and request latencies accumulated
[ "0" -lt $(jsum .poll_groups[].transports[].devices[].completions <<< "$stats") ] [ "0" -lt $(jsum .poll_groups[].transports[].devices[].completions <<< "$stats") ]
[ "0" -lt $(jsum .poll_groups[].transports[].devices[].request_latency <<< "$stats") ] [ "0" -lt $(jsum .poll_groups[].transports[].devices[].request_latency <<< "$stats") ]
fi fi
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT

View File

@ -59,7 +59,7 @@ function waitforio() {
fi fi
local ret=1 local ret=1
local i local i
for (( i = 10; i != 0; i-- )); do for ((i = 10; i != 0; i--)); do
read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops') read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops')
# A few I/O will happen during initial examine. So wait until at least 100 I/O # A few I/O will happen during initial examine. So wait until at least 100 I/O
# have completed to know that bdevperf is really generating the I/O. # have completed to know that bdevperf is really generating the I/O.
@ -73,7 +73,7 @@ function waitforio() {
} }
# Test 1: Kill the initiator unexpectedly with no I/O outstanding # Test 1: Kill the initiator unexpectedly with no I/O outstanding
function nvmf_shutdown_tc1 { function nvmf_shutdown_tc1() {
starttarget starttarget
# Run bdev_svc, which connects but does not issue I/O # Run bdev_svc, which connects but does not issue I/O
@ -97,7 +97,7 @@ function nvmf_shutdown_tc1 {
} }
# Test 2: Kill initiator unexpectedly with I/O outstanding # Test 2: Kill initiator unexpectedly with I/O outstanding
function nvmf_shutdown_tc2 { function nvmf_shutdown_tc2() {
starttarget starttarget
# Run bdevperf # Run bdevperf
@ -119,11 +119,11 @@ function nvmf_shutdown_tc2 {
} }
# Test 3: Kill the target unexpectedly with I/O outstanding # Test 3: Kill the target unexpectedly with I/O outstanding
function nvmf_shutdown_tc3 { function nvmf_shutdown_tc3() {
starttarget starttarget
# Run bdevperf # Run bdevperf
$rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 10 & $rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "${num_subsystems[@]}") -q 64 -o 65536 -w verify -t 10 &
perfpid=$! perfpid=$!
waitforlisten $perfpid /var/tmp/bdevperf.sock waitforlisten $perfpid /var/tmp/bdevperf.sock
$rpc_py -s /var/tmp/bdevperf.sock framework_wait_init $rpc_py -s /var/tmp/bdevperf.sock framework_wait_init

View File

@ -1,4 +1,3 @@
source $rootdir/scripts/common.sh source $rootdir/scripts/common.sh
source $rootdir/test/common/autotest_common.sh source $rootdir/test/common/autotest_common.sh
@ -11,8 +10,7 @@ function nvme_cfg() {
echo "$ocf_nvme_cfg" echo "$ocf_nvme_cfg"
} }
function clear_nvme() function clear_nvme() {
{
mapfile -t bdf < <(iter_all_pci_class_code 01 08 02) mapfile -t bdf < <(iter_all_pci_class_code 01 08 02)
# Clear metadata on NVMe device # Clear metadata on NVMe device

View File

@ -5,11 +5,11 @@ rootdir=$(readlink -f $curdir/../../..)
source $rootdir/test/ocf/common.sh source $rootdir/test/ocf/common.sh
function fio_verify(){ function fio_verify() {
fio_bdev $curdir/test.fio --aux-path=/tmp/ --ioengine=spdk_bdev "$@" fio_bdev $curdir/test.fio --aux-path=/tmp/ --ioengine=spdk_bdev "$@"
} }
function cleanup(){ function cleanup() {
rm -f $curdir/modes.conf rm -f $curdir/modes.conf
} }

View File

@ -6,13 +6,12 @@ source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py rpc_py=$rootdir/scripts/rpc.py
function bdev_check_claimed() function bdev_check_claimed() {
{ if [ "$($rpc_py get_bdevs -b "$@" | jq '.[0].claimed')" = "true" ]; then
if [ "$($rpc_py get_bdevs -b "$@" | jq '.[0].claimed')" = "true" ]; then return 0
return 0; else
else return 1
return 1; fi
fi
} }
$rootdir/app/iscsi_tgt/iscsi_tgt & $rootdir/app/iscsi_tgt/iscsi_tgt &
@ -34,13 +33,13 @@ $rpc_py bdev_ocf_get_bdevs NonExisting | jq -e \
'.[0] | .name == "PartCache"' '.[0] | .name == "PartCache"'
if ! bdev_check_claimed Malloc0; then if ! bdev_check_claimed Malloc0; then
>&2 echo "Base device expected to be claimed now" echo >&2 "Base device expected to be claimed now"
exit 1 exit 1
fi fi
$rpc_py bdev_ocf_delete PartCache $rpc_py bdev_ocf_delete PartCache
if bdev_check_claimed Malloc0; then if bdev_check_claimed Malloc0; then
>&2 echo "Base device is not expected to be claimed now" echo >&2 "Base device is not expected to be claimed now"
exit 1 exit 1
fi fi
@ -50,34 +49,34 @@ $rpc_py bdev_ocf_get_bdevs FullCache | jq -e \
'.[0] | .started and .cache.attached and .core.attached' '.[0] | .started and .cache.attached and .core.attached'
if ! (bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1); then if ! (bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1); then
>&2 echo "Base devices expected to be claimed now" echo >&2 "Base devices expected to be claimed now"
exit 1 exit 1
fi fi
$rpc_py bdev_ocf_delete FullCache $rpc_py bdev_ocf_delete FullCache
if bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1; then if bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1; then
>&2 echo "Base devices are not expected to be claimed now" echo >&2 "Base devices are not expected to be claimed now"
exit 1 exit 1
fi fi
$rpc_py bdev_ocf_create HotCache wt Malloc0 Malloc1 $rpc_py bdev_ocf_create HotCache wt Malloc0 Malloc1
if ! (bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1); then if ! (bdev_check_claimed Malloc0 && bdev_check_claimed Malloc1); then
>&2 echo "Base devices expected to be claimed now" echo >&2 "Base devices expected to be claimed now"
exit 1 exit 1
fi fi
$rpc_py bdev_malloc_delete Malloc0 $rpc_py bdev_malloc_delete Malloc0
if bdev_check_claimed Malloc1; then if bdev_check_claimed Malloc1; then
>&2 echo "Base device is not expected to be claimed now" echo >&2 "Base device is not expected to be claimed now"
exit 1 exit 1
fi fi
status=$($rpc_py get_bdevs) status=$($rpc_py get_bdevs)
gone=$(echo $status | jq 'map(select(.name == "HotCache")) == []') gone=$(echo $status | jq 'map(select(.name == "HotCache")) == []')
if [[ $gone == false ]]; then if [[ $gone == false ]]; then
>&2 echo "OCF bdev is expected to unregister" echo >&2 "OCF bdev is expected to unregister"
exit 1 exit 1
fi fi

View File

@ -7,15 +7,13 @@ source $rootdir/test/common/autotest_common.sh
rpc_py=$rootdir/scripts/rpc.py rpc_py=$rootdir/scripts/rpc.py
spdk_pid='?' spdk_pid='?'
function start_spdk() function start_spdk() {
{
$rootdir/app/iscsi_tgt/iscsi_tgt & $rootdir/app/iscsi_tgt/iscsi_tgt &
spdk_pid=$! spdk_pid=$!
trap 'killprocess $spdk_pid; exit 1' SIGINT SIGTERM EXIT trap 'killprocess $spdk_pid; exit 1' SIGINT SIGTERM EXIT
waitforlisten $spdk_pid waitforlisten $spdk_pid
} }
function stop_spdk() function stop_spdk() {
{
killprocess $spdk_pid killprocess $spdk_pid
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT
} }
@ -24,8 +22,8 @@ start_spdk
# Hotplug case # Hotplug case
$rpc_py bdev_malloc_create 1 512 -b Core0 $rpc_py bdev_malloc_create 1 512 -b Core0
$rpc_py bdev_malloc_create 1 512 -b Core1 $rpc_py bdev_malloc_create 1 512 -b Core1
$rpc_py bdev_ocf_create C1 wt Cache Core0 $rpc_py bdev_ocf_create C1 wt Cache Core0
$rpc_py bdev_ocf_create C2 wt Cache Core1 $rpc_py bdev_ocf_create C2 wt Cache Core1
@ -43,7 +41,7 @@ waitforbdev C2
# Detaching cores # Detaching cores
$rpc_py bdev_ocf_delete C2 $rpc_py bdev_ocf_delete C2
$rpc_py bdev_ocf_get_bdevs C1 | jq -e \ $rpc_py bdev_ocf_get_bdevs C1 | jq -e \
'.[0] | .started' '.[0] | .started'
@ -62,7 +60,7 @@ start_spdk
$rpc_py bdev_malloc_create 101 512 -b Cache $rpc_py bdev_malloc_create 101 512 -b Cache
$rpc_py bdev_malloc_create 101 512 -b Malloc $rpc_py bdev_malloc_create 101 512 -b Malloc
$rpc_py bdev_malloc_create 1 512 -b Core $rpc_py bdev_malloc_create 1 512 -b Core
$rpc_py bdev_ocf_create C1 wt Cache Malloc $rpc_py bdev_ocf_create C1 wt Cache Malloc
$rpc_py bdev_ocf_create C2 wt Cache Core $rpc_py bdev_ocf_create C2 wt Cache Core

View File

@ -22,8 +22,8 @@ waitforlisten $spdk_pid
# Create ocf on persistent storage # Create ocf on persistent storage
$rpc_py bdev_ocf_create ocfWT wt Nvme0n1p0 Nvme0n1p1 $rpc_py bdev_ocf_create ocfWT wt Nvme0n1p0 Nvme0n1p1
$rpc_py bdev_ocf_create ocfPT pt Nvme0n1p2 Nvme0n1p3 $rpc_py bdev_ocf_create ocfPT pt Nvme0n1p2 Nvme0n1p3
$rpc_py bdev_ocf_create ocfWB0 wb Nvme0n1p4 Nvme0n1p5 $rpc_py bdev_ocf_create ocfWB0 wb Nvme0n1p4 Nvme0n1p5
$rpc_py bdev_ocf_create ocfWB1 wb Nvme0n1p4 Nvme0n1p6 $rpc_py bdev_ocf_create ocfWB1 wb Nvme0n1p4 Nvme0n1p6

View File

@ -23,7 +23,7 @@ waitforlisten $spdk_pid
# Create ocf on persistent storage # Create ocf on persistent storage
$rpc_py bdev_ocf_create ocfWT wt aio0 aio1 $rpc_py bdev_ocf_create ocfWT wt aio0 aio1
# Check that ocfWT was created properly # Check that ocfWT was created properly

View File

@ -3,9 +3,11 @@
testdir=$(readlink -f $(dirname $0)) testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..) rootdir=$(readlink -f $testdir/../..)
function usage() function usage() {
{ [[ -n $2 ]] && (
[[ -n $2 ]] && ( echo "$2"; echo ""; ) echo "$2"
echo ""
)
echo "Devstack installation script" echo "Devstack installation script"
echo "Usage: $(basename $1) [OPTIONS]" echo "Usage: $(basename $1) [OPTIONS]"
echo "--branch=BRANCH Define which version of openstack" echo "--branch=BRANCH Define which version of openstack"
@ -15,18 +17,17 @@ function usage()
exit 0 exit 0
} }
branch="master" branch="master"
while getopts 'h-:' optchar; do while getopts 'h-:' optchar; do
case "$optchar" in case "$optchar" in
-) -)
case "$OPTARG" in case "$OPTARG" in
help) usage $0 ;; help) usage $0 ;;
branch=*) branch="${OPTARG#*=}" ;; branch=*) branch="${OPTARG#*=}" ;;
esac esac
;; ;;
h) usage $0 ;; h) usage $0 ;;
*) usage $0 "Invalid argument '$OPTARG'" *) usage $0 "Invalid argument '$OPTARG'" ;;
esac esac
done done

View File

@ -9,15 +9,15 @@ TEST_TRANSPORT='rdma'
nvmftestinit nvmftestinit
function finish_test { function finish_test() {
{ {
"$rpc_py" bdev_lvol_delete_lvstore -l lvs0 "$rpc_py" bdev_lvol_delete_lvstore -l lvs0
kill -9 $rpc_proxy_pid kill -9 $rpc_proxy_pid
rm "$testdir/conf.json" rm "$testdir/conf.json"
} || : } || :
} }
cat <<-JSON >"$testdir/conf.json" cat <<- JSON > "$testdir/conf.json"
{"subsystems":[ {"subsystems":[
$("$rootdir/scripts/gen_nvme.sh" --json) $("$rootdir/scripts/gen_nvme.sh" --json)
]} ]}

View File

@ -1,8 +1,7 @@
# Prints error message and return error code, closes vhost app and remove # Prints error message and return error code, closes vhost app and remove
# pmem pool file # pmem pool file
# input: error message, error code # input: error message, error code
function error() function error() {
{
local error_code=${2:-1} local error_code=${2:-1}
echo "===========" echo "==========="
echo -e "ERROR: $1" echo -e "ERROR: $1"
@ -16,8 +15,7 @@ function error()
# check if there is pool file & remove it # check if there is pool file & remove it
# input: path to pool file # input: path to pool file
# default: $default_pool_file # default: $default_pool_file
function pmem_clean_pool_file() function pmem_clean_pool_file() {
{
local pool_file=${1:-$default_pool_file} local pool_file=${1:-$default_pool_file}
if [ -f $pool_file ]; then if [ -f $pool_file ]; then
@ -29,8 +27,7 @@ function pmem_clean_pool_file()
# create new pmem file # create new pmem file
# input: path to pool file, size in MB, block_size # input: path to pool file, size in MB, block_size
# default: $default_pool_file 32 512 # default: $default_pool_file 32 512
function pmem_create_pool_file() function pmem_create_pool_file() {
{
local pool_file=${1:-$default_pool_file} local pool_file=${1:-$default_pool_file}
local size=${2:-32} local size=${2:-32}
local block_size=${3:-512} local block_size=${3:-512}
@ -46,8 +43,7 @@ function pmem_create_pool_file()
fi fi
} }
function pmem_unmount_ramspace function pmem_unmount_ramspace() {
{
if [ -d "$testdir/ramspace" ]; then if [ -d "$testdir/ramspace" ]; then
if mount | grep -q "$testdir/ramspace"; then if mount | grep -q "$testdir/ramspace"; then
umount $testdir/ramspace umount $testdir/ramspace
@ -57,16 +53,14 @@ function pmem_unmount_ramspace
fi fi
} }
function pmem_print_tc_name function pmem_print_tc_name() {
{
echo "" echo ""
echo "===============================================================" echo "==============================================================="
echo "Now running: $1" echo "Now running: $1"
echo "===============================================================" echo "==============================================================="
} }
function vhost_start() function vhost_start() {
{
local vhost_pid local vhost_pid
$rootdir/app/vhost/vhost & $rootdir/app/vhost/vhost &
@ -76,8 +70,7 @@ function vhost_start()
waitforlisten $vhost_pid waitforlisten $vhost_pid
} }
function vhost_kill() function vhost_kill() {
{
local vhost_pid_file="$testdir/vhost.pid" local vhost_pid_file="$testdir/vhost.pid"
local vhost_pid local vhost_pid
vhost_pid="$(cat $vhost_pid_file)" vhost_pid="$(cat $vhost_pid_file)"

Some files were not shown because too many files have changed in this diff Show More