test/lvol: Rewrite clear_method_none and clear_method_unmap to Bash

These are the final test cases from the series of ports, thus clean up
the python script and its lib as well.

Change-Id: I9923fc93fbeb8c2d54dd2dad5acae41eaf5cfff5
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/934
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
Michal Berger 2020-02-17 13:57:10 +01:00 committed by Tomasz Zawadzki
parent 75b782be4f
commit dcae88350e
6 changed files with 117 additions and 758 deletions

View File

@ -239,8 +239,6 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
fi
if [ $SPDK_TEST_LVOL -eq 1 ]; then
#TODO: rewrite lvol tests in bash.
run_test "lvol" ./test/lvol/lvol.sh --test-cases=all
run_test "lvol2" ./test/lvol/lvol2.sh
run_test "blob_io_wait" ./test/blobstore/blob_io_wait/blob_io_wait.sh
fi

View File

@ -4,6 +4,7 @@ testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/lvol/common.sh
source "$rootdir/test/bdev/nbd_common.sh"
# create empty lvol store and verify its parameters
function test_construct_lvs() {
@ -165,6 +166,120 @@ function test_construct_lvs_clear_methods() {
check_leftover_devices
}
# Test for clear_method equals to none
function test_construct_lvol_fio_clear_method_none() {
local nbd_name=/dev/nbd0
local clear_method=none
local lvstore_name=lvs_test lvstore_uuid
local lvol_name=lvol_test lvol_uuid
local malloc_dev
malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore "$malloc_dev" "$lvstore_name")
get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
lvol_uuid=$(rpc_cmd bdev_lvol_create \
-c "$clear_method" \
-u "$lvstore_uuid" \
"$lvol_name" \
$(( jq_out["cluster_size"] / 1024**2 )))
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name"
run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" write 0xdd
nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
rpc_cmd bdev_lvol_delete "$lvol_uuid"
rpc_cmd bdev_lvol_delete_lvstore -u "$lvstore_uuid"
nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name"
local metadata_pages
local last_metadata_lba
local offset_metadata_end
local last_cluster_of_metadata
local offset
local size_metadata_end
metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3")
last_metadata_lba=$(( metadata_pages * 4096 / MALLOC_BS ))
offset_metadata_end=$(( last_metadata_lba * MALLOC_BS ))
last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)")
last_cluster_of_metadata=$(( last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata ))
offset=$(( last_cluster_of_metadata * jq_out["cluster_size"] ))
size_metadata_end=$(( offset - offset_metadata_end ))
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0x00
# Check if data on first lvol bdevs remains unchanged.
run_fio_test "$nbd_name" "$offset" "${jq_out["cluster_size"]}" "read" 0xdd
nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
rpc_cmd bdev_malloc_delete "$malloc_dev"
check_leftover_devices
}
# Test for clear_method equals to unmap
function test_construct_lvol_fio_clear_method_unmap() {
local nbd_name=/dev/nbd0
local clear_method=unmap
local lvstore_name=lvs_test lvstore_uuid
local lvol_name=lvol_test lvol_uuid
local malloc_dev
malloc_dev=$(rpc_cmd bdev_malloc_create 256 "$MALLOC_BS")
nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name"
run_fio_test "$nbd_name" 0 $(( 256 * 1024**2 )) write 0xdd
nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
lvstore_uuid=$(rpc_cmd bdev_lvol_create_lvstore --clear-method none "$malloc_dev" "$lvstore_name")
get_lvs_jq bdev_lvol_get_lvstores -u "$lvstore_uuid"
lvol_uuid=$(rpc_cmd bdev_lvol_create \
-c "$clear_method" \
-u "$lvstore_uuid" \
"$lvol_name" \
$(( jq_out["cluster_size"] / 1024**2 )))
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid" "$nbd_name"
run_fio_test "$nbd_name" 0 "${jq_out["cluster_size"]}" read 0xdd
nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
rpc_cmd bdev_lvol_delete "$lvol_uuid"
rpc_cmd bdev_lvol_delete_lvstore -u "$lvstore_uuid"
nbd_start_disks "$DEFAULT_RPC_ADDR" "$malloc_dev" "$nbd_name"
local metadata_pages
local last_metadata_lba
local offset_metadata_end
local last_cluster_of_metadata
local offset
local size_metadata_end
metadata_pages=$(calc "1 + ${jq_out["total_data_clusters"]} + ceil(5 + ceil(${jq_out["total_data_clusters"]} / 8) / 4096) * 3")
last_metadata_lba=$(( metadata_pages * 4096 / MALLOC_BS ))
offset_metadata_end=$(( last_metadata_lba * MALLOC_BS ))
last_cluster_of_metadata=$(calc "ceil($metadata_pages / ${jq_out["cluster_size"]} / 4096)")
last_cluster_of_metadata=$(( last_cluster_of_metadata == 0 ? 1 : last_cluster_of_metadata ))
offset=$(( last_cluster_of_metadata * jq_out["cluster_size"] ))
size_metadata_end=$(( offset - offset_metadata_end ))
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0xdd
# Check if data on lvol bdev was zeroed. Malloc bdev should zero any data that is unmapped.
run_fio_test "$nbd_name" "$offset" "${jq_out["cluster_size"]}" "read" 0x00
nbd_stop_disks "$DEFAULT_RPC_ADDR" "$nbd_name"
rpc_cmd bdev_malloc_delete "$malloc_dev"
check_leftover_devices
}
# create lvs + lvol on top, verify lvol's parameters
function test_construct_lvol() {
# create an lvol store
@ -436,6 +551,8 @@ run_test "test_construct_two_lvs_on_the_same_bdev" test_construct_two_lvs_on_the
run_test "test_construct_lvs_conflict_alias" test_construct_lvs_conflict_alias
run_test "test_construct_lvs_different_cluster_size" test_construct_lvs_different_cluster_size
run_test "test_construct_lvs_clear_methods" test_construct_lvs_clear_methods
run_test "test_construct_lvol_fio_clear_method_none" test_construct_lvol_fio_clear_method_none
run_test "test_construct_lvol_fio_clear_method_unmap" test_construct_lvol_fio_clear_method_unmap
run_test "test_construct_lvol" test_construct_lvol
run_test "test_construct_multi_lvols" test_construct_multi_lvols
run_test "test_construct_lvols_conflict_alias" test_construct_lvols_conflict_alias

View File

@ -1,82 +0,0 @@
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../..)
source $rootdir/test/common/autotest_common.sh
total_size=256
block_size=512
test_cases=all
x=""
rpc_py="$rootdir/scripts/rpc.py "
function usage() {
[[ -n $2 ]] && ( echo "$2"; echo ""; )
echo "Shortcut script for doing automated lvol tests"
echo "Usage: $(basename $1) [OPTIONS]"
echo
echo "-h, --help print help and exit"
echo " --total-size Size of malloc bdev in MB (int > 0)"
echo " --block-size Block size for this bdev"
echo "-x set -x for script debug"
echo " --test-cases= List test cases which will be run:
350: 'nested_destroy_logical_volume_negative',
400: 'nested_construct_logical_volume_positive',
850: 'clear_method_none',
851: 'clear_method_unmap',
or
all: This parameter runs all tests
Ex: \"1,2,19,20\", default: all"
echo
echo
}
while getopts 'xh-:' optchar; do
case "$optchar" in
-)
case "$OPTARG" in
help) usage $0 && exit 0;;
total-size=*) total_size="${OPTARG#*=}" ;;
block-size=*) block_size="${OPTARG#*=}" ;;
test-cases=*) test_cases="${OPTARG#*=}" ;;
*) usage $0 "Invalid argument '$OPTARG'" && exit 1 ;;
esac
;;
h) usage $0 && exit 0 ;;
x) set -x
x="-x" ;;
*) usage $0 "Invalid argument '$OPTARG'" && exit 1 ;;
esac
done
shift $(( OPTIND - 1 ))
### Function starts vhost app
function vhost_start()
{
modprobe nbd
$rootdir/app/vhost/vhost &
vhost_pid=$!
echo $vhost_pid > $testdir/vhost.pid
waitforlisten $vhost_pid
}
### Function stops vhost app
function vhost_kill()
{
### Kill with SIGKILL param
if pkill -F $testdir/vhost.pid; then
sleep 1
fi
rm $testdir/vhost.pid || true
}
trap 'vhost_kill; rm -f $testdir/aio_bdev_0 $testdir/aio_bdev_1; exit 1' SIGINT SIGTERM EXIT
truncate -s 400M $testdir/aio_bdev_0 $testdir/aio_bdev_1
vhost_start
$testdir/lvol_test.py $rpc_py $total_size $block_size $testdir $rootdir/app/vhost "${test_cases[@]}"
vhost_kill 0
rm -rf $testdir/aio_bdev_0 $testdir/aio_bdev_1
trap - SIGINT SIGTERM EXIT

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python3
import sys
from test_cases import *
if __name__ == "__main__":
rpc_py = None
total_size = None
block_size = None
num_test = None
fail_count = 0
tc_failed = []
tc_list = []
if len(sys.argv) == 7 and len(sys.argv[6].split(',')) <= test_counter():
rpc_py = sys.argv[1]
total_size = int(sys.argv[2])
block_size = int(sys.argv[3])
base_dir_path = sys.argv[4]
app_path = sys.argv[5]
tc_list = sys.argv[6].split(',')
else:
print("Invalid argument")
try:
tc = TestCases(rpc_py, total_size, block_size, base_dir_path, app_path)
if "all" in tc_list:
tc_list = sorted([i.split("test_case")[1] for i in dir(TestCases) if "test_case" in i], key=int)
for num_test in tc_list:
fail_count = 0
exec("fail_count += tc.test_case{num_test}"
"()".format(num_test=num_test))
if fail_count:
tc_failed.append(num_test)
if not tc_failed:
print("RESULT: All test cases - PASS")
elif tc_failed:
print("RESULT: Some test cases FAIL")
print(tc_failed)
sys.exit(1)
except BaseException:
print("Test: {num_test} - FAIL".format(num_test=num_test))
sys.exit(1)

View File

@ -1,248 +0,0 @@
import json
import sys
from uuid import UUID
from subprocess import check_output, CalledProcessError
class Spdk_Rpc(object):
def __init__(self, rpc_py):
self.rpc_py = rpc_py
def __getattr__(self, name):
def call(*args):
cmd = "{} {} {}".format(sys.executable, self.rpc_py, name)
for arg in args:
cmd += " {}".format(arg)
try:
output = check_output(cmd, shell=True)
return output.decode('ascii').rstrip('\n'), 0
except CalledProcessError as e:
print("ERROR: RPC Command {cmd} "
"execution failed:". format(cmd=cmd))
print("Failed command output:")
print(e.output)
return e.output.decode('ascii'), e.returncode
return call
class Commands_Rpc(object):
def __init__(self, rpc_py):
self.rpc = Spdk_Rpc(rpc_py)
def check_bdev_get_bdevs_methods(self, uuid_bdev, bdev_size_mb, bdev_alias=""):
print("INFO: Check RPC COMMAND bdev_get_bdevs")
output = self.rpc.bdev_get_bdevs()[0]
json_value = json.loads(output)
for i in range(len(json_value)):
uuid_json = json_value[i]['name']
aliases = json_value[i]['aliases']
if uuid_bdev in [uuid_json]:
print("Info: UUID:{uuid} is found in RPC Command: "
"gets_bdevs response".format(uuid=uuid_bdev))
# Check if human-friendly alias is as expected
if bdev_alias and aliases:
if bdev_alias not in aliases:
print("ERROR: Expected bdev alias not found")
print("Expected: {name}".format(name=bdev_alias))
print("Actual: {aliases}".format(aliases=aliases))
return 1
# num_block and block_size have values in bytes
num_blocks = json_value[i]['num_blocks']
block_size = json_value[i]['block_size']
if num_blocks * block_size == bdev_size_mb * 1024 * 1024:
print("Info: Response bdev_get_bdevs command is "
"correct. Params: uuid_bdevs: {uuid}, bdev_size "
"{size}".format(uuid=uuid_bdev,
size=bdev_size_mb))
return 0
print("INFO: UUID:{uuid} or bdev_size:{bdev_size_mb} not found in "
"RPC COMMAND bdev_get_bdevs: "
"{json_value}".format(uuid=uuid_bdev, bdev_size_mb=bdev_size_mb,
json_value=json_value))
return 1
def check_bdev_lvol_get_lvstores(self, base_name, uuid, cluster_size=None, lvs_name=""):
print("INFO: RPC COMMAND bdev_lvol_get_lvstores")
json_value = self.bdev_lvol_get_lvstores()
if json_value:
for i in range(len(json_value)):
json_uuid = json_value[i]['uuid']
json_cluster = json_value[i]['cluster_size']
json_base_name = json_value[i]['base_bdev']
json_name = json_value[i]['name']
if base_name in json_base_name \
and uuid in json_uuid:
print("INFO: base_name:{base_name} is found in RPC "
"Command: bdev_lvol_get_lvstores "
"response".format(base_name=base_name))
print("INFO: UUID:{uuid} is found in RPC Command: "
"bdev_lvol_get_lvstores response".format(uuid=uuid))
if cluster_size:
if str(cluster_size) in str(json_cluster):
print("Info: Cluster size :{cluster_size} is found in RPC "
"Command: bdev_lvol_get_lvstores "
"response".format(cluster_size=cluster_size))
else:
print("ERROR: Wrong cluster size in lvol store")
print("Expected:".format(cluster_size))
print("Actual:".format(json_cluster))
return 1
# Also check name if param is provided:
if lvs_name:
if lvs_name not in json_name:
print("ERROR: Lvol store human-friendly name does not match")
print("Expected: {lvs_name}".format(lvs_name=lvs_name))
print("Actual: {name}".format(name=json_name))
return 1
return 0
print("FAILED: UUID: lvol store {uuid} on base_bdev: "
"{base_name} not found in bdev_lvol_get_lvstores()".format(uuid=uuid,
base_name=base_name))
return 1
else:
print("INFO: Lvol store not exist")
return 2
return 0
def bdev_malloc_create(self, total_size, block_size):
print("INFO: RPC COMMAND bdev_malloc_create")
output = self.rpc.bdev_malloc_create(total_size, block_size)[0]
return output.rstrip('\n')
def bdev_lvol_create_lvstore(self, base_name, lvs_name, cluster_size=None, clear_method=None):
print("INFO: RPC COMMAND bdev_lvol_create_lvstore")
if cluster_size:
output = self.rpc.bdev_lvol_create_lvstore(base_name,
lvs_name,
"-c {cluster_sz}".format(cluster_sz=cluster_size))[0]
elif clear_method:
output = self.rpc.bdev_lvol_create_lvstore(base_name,
lvs_name,
"--clear-method {clear_m}".format(clear_m=clear_method))[0]
else:
output = self.rpc.bdev_lvol_create_lvstore(base_name, lvs_name)[0]
return output.rstrip('\n')
def bdev_lvol_create(self, uuid, lbd_name, size, thin=False, clear_method=None):
print("INFO: RPC COMMAND bdev_lvol_create")
try:
uuid_obj = UUID(uuid)
name_opt = "-u"
except ValueError:
name_opt = "-l"
thin_provisioned = ""
if thin:
thin_provisioned = "-t"
c_m = ""
if clear_method:
c_m = "--clear-method {clear_m}".format(clear_m=clear_method)
output = self.rpc.bdev_lvol_create(name_opt, uuid, lbd_name, size, thin_provisioned, c_m)[0]
return output.rstrip('\n')
def bdev_lvol_delete_lvstore(self, uuid):
print("INFO: RPC COMMAND bdev_lvol_delete_lvstore")
try:
uuid_obj = UUID(uuid)
name_opt = "-u"
except ValueError:
name_opt = "-l"
output, rc = self.rpc.bdev_lvol_delete_lvstore(name_opt, uuid)
return rc
def bdev_malloc_delete(self, base_name):
print("INFO: RPC COMMAND bdev_malloc_delete")
output, rc = self.rpc.bdev_malloc_delete(base_name)
return rc
def bdev_lvol_delete(self, bdev_name):
print("INFO: RPC COMMAND bdev_lvol_delete")
output, rc = self.rpc.bdev_lvol_delete(bdev_name)
return rc
def bdev_lvol_resize(self, uuid, new_size):
print("INFO: RPC COMMAND bdev_lvol_resize")
output, rc = self.rpc.bdev_lvol_resize(uuid, new_size)
return rc
def bdev_lvol_set_read_only(self, uuid):
print("INFO: RPC COMMAND bdev_lvol_set_read_only")
output, rc = self.rpc.bdev_lvol_set_read_only(uuid)
return rc
def nbd_start_disk(self, bdev_name, nbd_name):
print("INFO: RPC COMMAND nbd_start_disk")
output, rc = self.rpc.nbd_start_disk(bdev_name, nbd_name)
return rc
def nbd_stop_disk(self, nbd_name):
print("INFO: RPC COMMAND nbd_stop_disk")
output, rc = self.rpc.nbd_stop_disk(nbd_name)
return rc
def bdev_lvol_get_lvstores(self, name=None):
print("INFO: RPC COMMAND bdev_lvol_get_lvstores")
if name:
output = json.loads(self.rpc.bdev_lvol_get_lvstores("-l", name)[0])
else:
output = json.loads(self.rpc.bdev_lvol_get_lvstores()[0])
return output
def get_lvol_bdevs(self):
print("INFO: RPC COMMAND bdev_get_bdevs; lvol bdevs only")
output = []
rpc_output = json.loads(self.rpc.bdev_get_bdevs()[0])
for bdev in rpc_output:
if bdev["product_name"] == "Logical Volume":
output.append(bdev)
return output
def get_lvol_bdev_with_name(self, name):
print("INFO: RPC COMMAND bdev_get_bdevs; lvol bdevs only")
rpc_output = json.loads(self.rpc.bdev_get_bdevs("-b", name)[0])
if len(rpc_output) > 0:
return rpc_output[0]
return None
def bdev_lvol_rename_lvstore(self, old_name, new_name):
print("INFO: Renaming lvol store from {old} to {new}".format(old=old_name, new=new_name))
output, rc = self.rpc.bdev_lvol_rename_lvstore(old_name, new_name)
return rc
def bdev_lvol_rename(self, old_name, new_name):
print("INFO: Renaming lvol bdev from {old} to {new}".format(old=old_name, new=new_name))
output, rc = self.rpc.bdev_lvol_rename(old_name, new_name)
return rc
def bdev_lvol_snapshot(self, bdev_name, snapshot_name):
print("INFO: RPC COMMAND bdev_lvol_snapshot")
output, rc = self.rpc.bdev_lvol_snapshot(bdev_name, snapshot_name)
return rc
def bdev_lvol_clone(self, snapshot_name, clone_name):
print("INFO: RPC COMMAND bdev_lvol_clone")
output, rc = self.rpc.bdev_lvol_clone(snapshot_name, clone_name)
return rc
def bdev_lvol_inflate(self, clone_name):
print("INFO: RPC COMMAND bdev_lvol_inflate")
output, rc = self.rpc.bdev_lvol_inflate(clone_name)
return rc
def bdev_lvol_decouple_parent(self, clone_name):
print("INFO: RPC COMMAND bdev_lvol_decouple_parent")
output, rc = self.rpc.bdev_lvol_decouple_parent(clone_name)
return rc
def bdev_aio_create(self, aio_path, aio_name, aio_bs=""):
print("INFO: RPC COMMAND bdev_aio_create")
output, rc = self.rpc.bdev_aio_create(aio_path, aio_name, aio_bs)
return rc
def bdev_aio_delete(self, aio_name):
print("INFO: RPC COMMAND bdev_aio_delete")
output, rc = self.rpc.bdev_aio_delete(aio_name)
return rc

View File

@ -1,381 +0,0 @@
import io
import math
import time
import sys
import random
import signal
import subprocess
import pprint
import socket
import threading
import os
from errno import ESRCH
from os import kill, path, unlink, path, listdir, remove
from rpc_commands_lib import Commands_Rpc
from time import sleep
from uuid import uuid4
MEGABYTE = 1024 * 1024
current_fio_pid = -1
# ## Objective
# The purpose of these tests is to verify the possibility of using lvol configuration in SPDK.
#
# ## Methodology
# Configuration in test is to be done using example stub application.
# All management is done using RPC calls, including logical volumes management.
# All tests are performed using malloc backends.
# One exception to malloc backends are tests for logical volume
# tasting - these require persistent merory like NVMe backend.
#
# Tests will be executed as scenarios - sets of smaller test step
# in which return codes from RPC calls is validated.
# Some configuration calls may also be validated by use of
# "get_*" RPC calls, which provide additional information for verifying
# results.
#
# Tests with thin provisioned lvol bdevs, snapshots and clones are using nbd devices.
# Before writing/reading to lvol bdev, bdev is installed with rpc nbd_start_disk.
# After finishing writing/reading, rpc nbd_stop_disk is used.
def is_process_alive(pid):
try:
os.kill(pid, 0)
except Exception as e:
return 1
return 0
def get_fio_cmd(nbd_disk, offset, size, rw, pattern, extra_params=""):
fio_template = "fio --name=fio_test --filename=%(file)s --offset=%(offset)s --size=%(size)s"\
" --rw=%(rw)s --direct=1 %(extra_params)s %(pattern)s"
pattern_template = ""
if pattern:
pattern_template = "--do_verify=1 --verify=pattern --verify_pattern=%s"\
" --verify_state_save=0" % pattern
fio_cmd = fio_template % {"file": nbd_disk, "offset": offset, "size": size,
"rw": rw, "pattern": pattern_template,
"extra_params": extra_params}
return fio_cmd
def run_fio(fio_cmd, expected_ret_value):
global current_fio_pid
try:
proc = subprocess.Popen([fio_cmd], shell=True)
current_fio_pid = proc.pid
proc.wait()
rv = proc.returncode
except Exception as e:
print("ERROR: Fio test ended with unexpected exception.")
rv = 1
if expected_ret_value == rv:
return 0
if rv == 0:
print("ERROR: Fio test ended with unexpected success")
else:
print("ERROR: Fio test ended with unexpected failure")
return 1
class FioThread(threading.Thread):
def __init__(self, nbd_disk, offset, size, rw, pattern, expected_ret_value,
extra_params=""):
super(FioThread, self).__init__()
self.fio_cmd = get_fio_cmd(nbd_disk, offset, size, rw, pattern,
extra_params=extra_params)
self.rv = 1
self.expected_ret_value = expected_ret_value
def run(self):
print("INFO: Starting fio")
self.rv = run_fio(self.fio_cmd, self.expected_ret_value)
print("INFO: Fio test finished")
def test_counter():
'''
:return: the number of tests
'''
return ['test_case' in i for i in dir(TestCases)].count(True)
def case_message(func):
def inner(*args, **kwargs):
test_name = {
# logical volume clear_method test
850: 'clear_method_none',
851: 'clear_method_unmap',
}
num = int(func.__name__.strip('test_case')[:])
print("************************************")
print("START TEST CASE {name}".format(name=test_name[num]))
print("************************************")
fail_count = func(*args, **kwargs)
print("************************************")
if not fail_count:
print("END TEST CASE {name} PASS".format(name=test_name[num]))
else:
print("END TEST CASE {name} FAIL".format(name=test_name[num]))
print("************************************")
return fail_count
return inner
class TestCases(object):
def __init__(self, rpc_py, total_size, block_size, base_dir_path, app_path):
self.c = Commands_Rpc(rpc_py)
self.total_size = total_size
self.block_size = block_size
self.cluster_size = None
self.path = base_dir_path
self.app_path = app_path
self.lvs_name = "lvs_test"
self.lbd_name = "lbd_test"
self.vhost_config_path = path.join(path.dirname(sys.argv[0]), 'vhost.conf')
def _gen_lvs_uuid(self):
return str(uuid4())
def _gen_lvb_uuid(self):
return "_".join([str(uuid4()), str(random.randrange(9999999999))])
def compare_two_disks(self, disk1, disk2, expected_ret_value):
cmp_cmd = "cmp %s %s" % (disk1, disk2)
try:
process = subprocess.check_output(cmp_cmd, stderr=subprocess.STDOUT, shell=True)
rv = 0
except subprocess.CalledProcessError as ex:
rv = 1
except Exception as e:
print("ERROR: Cmp ended with unexpected exception.")
rv = 1
if expected_ret_value == rv:
return 0
elif rv == 0:
print("ERROR: Cmp ended with unexpected success")
else:
print("ERROR: Cmp ended with unexpected failure")
return 1
def run_fio_test(self, nbd_disk, offset, size, rw, pattern, expected_ret_value=0):
fio_cmd = get_fio_cmd(nbd_disk, offset, size, rw, pattern)
return run_fio(fio_cmd, expected_ret_value)
def _stop_vhost(self, pid_path):
with io.open(pid_path, 'r') as vhost_pid:
pid = int(vhost_pid.readline())
if pid:
try:
kill(pid, signal.SIGTERM)
for count in range(30):
sleep(1)
kill(pid, 0)
except OSError as err:
if err.errno == ESRCH:
pass
else:
return 1
else:
return 1
else:
return 1
return 0
def _start_vhost(self, vhost_path, pid_path):
subprocess.call("{app} -f "
"{pid} &".format(app=vhost_path,
pid=pid_path), shell=True)
for timeo in range(10):
if timeo == 9:
print("ERROR: Timeout on waiting for app start")
return 1
if not path.exists(pid_path):
print("Info: Waiting for PID file...")
sleep(1)
continue
else:
break
# Wait for RPC to open
sock = socket.socket(socket.AF_UNIX)
for timeo in range(30):
if timeo == 29:
print("ERROR: Timeout on waiting for RPC start")
return 1
try:
sock.connect("/var/tmp/spdk.sock")
break
except socket.error as e:
print("Info: Waiting for RPC Unix socket...")
sleep(1)
continue
else:
sock.close()
break
with io.open(pid_path, 'r') as vhost_pid:
pid = int(vhost_pid.readline())
if not pid:
return 1
return 0
def get_lvs_size(self, lvs_name="lvs_test"):
lvs = self.c.bdev_lvol_get_lvstores(lvs_name)[0]
return int(int(lvs['free_clusters'] * lvs['cluster_size']) / MEGABYTE)
def get_lvs_divided_size(self, split_num, lvs_name="lvs_test"):
# Actual size of lvol bdevs on creation is rounded up to multiple of cluster size.
# In order to avoid over provisioning, this function returns
# lvol store size in MB divided by split_num - rounded down to multiple of cluster size."
lvs = self.c.bdev_lvol_get_lvstores(lvs_name)[0]
return int(int(lvs['free_clusters'] / split_num) * lvs['cluster_size'] / MEGABYTE)
def get_lvs_cluster_size(self, lvs_name="lvs_test"):
lvs = self.c.bdev_lvol_get_lvstores(lvs_name)[0]
return int(int(lvs['cluster_size']) / MEGABYTE)
@case_message
def test_case850(self):
""""
Clear_method
Test for clear_method equals to none
"""
# Create malloc bdev
base_name = self.c.bdev_malloc_create(self.total_size,
self.block_size)
# Construct lvol store on created malloc bddev
lvs_uuid = self.c.bdev_lvol_create_lvstore(base_name,
self.lvs_name)
# Check correct uuid values in response bdev_lvol_get_lvstores command
fail_count = self.c.check_bdev_lvol_get_lvstores(base_name, lvs_uuid,
self.cluster_size)
lvs = self.c.bdev_lvol_get_lvstores(self.lvs_name)[0]
# Construct lvol bdev on lvol store
lbd_size = int(lvs['cluster_size'] / MEGABYTE)
bdev_uuid = self.c.bdev_lvol_create(lvs_uuid,
self.lbd_name,
lbd_size,
clear_method='none')
lvol_bdev = self.c.get_lvol_bdev_with_name(bdev_uuid)
nbd_name = "/dev/nbd0"
fail_count += self.c.nbd_start_disk(bdev_uuid, nbd_name)
# Write pattern to lvol bdev starting from offset 0.
fail_count += self.run_fio_test(nbd_name, 0, lvs['cluster_size'],
"write", "0xdd")
fail_count += self.c.nbd_stop_disk(nbd_name)
# Delete lvol bdev
fail_count += self.c.bdev_lvol_delete(bdev_uuid)
# Delete lvol store. We need to do this so that we can attach the underlying malloc
# bdev to nbd to examine its contents.
fail_count += self.c.bdev_lvol_delete_lvstore(lvs_uuid)
fail_count += self.c.nbd_start_disk(base_name, nbd_name)
metadata_pages = 1 + lvs['total_data_clusters'] + (math.ceil(5 + math.ceil(lvs['total_data_clusters'] / 8) / 4096)) * 3
last_metadata_lba = int(metadata_pages * 4096 / self.block_size)
offset_metadata_end = int(last_metadata_lba * self.block_size)
last_cluster_of_metadata = math.ceil(metadata_pages / lvs['cluster_size'] / 4096)
offset = last_cluster_of_metadata * lvs['cluster_size']
size_metadata_end = offset - offset_metadata_end
# Check if data on area between end of metadata
# and first cluster of lvol bdev remained unchaged
fail_count += self.run_fio_test("/dev/nbd0", offset_metadata_end,
size_metadata_end, "read", "0x00")
# Check if data on first lvol bdevs remains unchanged.
fail_count += self.run_fio_test("/dev/nbd0", offset, lvs['cluster_size'], "read", "0xdd")
fail_count += self.c.nbd_stop_disk(nbd_name)
self.c.bdev_malloc_delete(base_name)
# Expected result:
# - calls successful, return code = 0
# - get_bdevs: no change
# - no other operation fails
return fail_count
@case_message
def test_case851(self):
""""
Clear_method
Test lvol bdev with clear_method equals to unmap
"""
# Create malloc bdev
base_name = self.c.bdev_malloc_create(self.total_size,
self.block_size)
nbd_name = "/dev/nbd0"
fail_count = self.c.nbd_start_disk(base_name, nbd_name)
# Write data to malloc bdev starting from offset 0.
fail_count += self.run_fio_test(nbd_name, 0, self.total_size * MEGABYTE,
"write", "0xdd")
fail_count += self.c.nbd_stop_disk(nbd_name)
# Construct lvol store on created malloc bddev
lvs_uuid = self.c.bdev_lvol_create_lvstore(base_name,
self.lvs_name,
clear_method='none')
# Check correct uuid values in response bdev_lvol_get_lvstores command
fail_count = self.c.check_bdev_lvol_get_lvstores(base_name, lvs_uuid,
self.cluster_size)
lvs = self.c.bdev_lvol_get_lvstores(self.lvs_name)[0]
# Construct lvol bdev on lvol store
lbd_size = int(lvs['cluster_size'] / MEGABYTE)
bdev_uuid = self.c.bdev_lvol_create(lvs_uuid,
self.lbd_name,
lbd_size,
clear_method='unmap')
# Check that data on lvol bdev remains unchanged
fail_count += self.c.nbd_start_disk(bdev_uuid, nbd_name)
fail_count += self.run_fio_test(nbd_name, 0, lvs['cluster_size'],
"read", "0xdd")
fail_count += self.c.nbd_stop_disk(nbd_name)
# Delete lvol bdev
fail_count += self.c.bdev_lvol_delete(bdev_uuid)
# Delete lvol store
fail_count += self.c.bdev_lvol_delete_lvstore(lvs_uuid)
fail_count += self.c.nbd_start_disk(base_name, nbd_name)
metadata_pages = 1 + lvs['total_data_clusters'] + \
(math.ceil(5 + math.ceil(lvs['total_data_clusters'] / 8) / 4096)) * 3
last_metadata_lba = int(metadata_pages * 4096 / self.block_size)
offset_metadata_end = int(last_metadata_lba * self.block_size)
last_cluster_of_metadata = math.ceil(metadata_pages / lvs['cluster_size'] / 4096)
offset = last_cluster_of_metadata * lvs['cluster_size']
size_metadata_end = offset - offset_metadata_end
# Check if data on area between end of metadata
# and first cluster of lvol bdev remained unchaged
fail_count += self.run_fio_test("/dev/nbd0", offset_metadata_end,
size_metadata_end, "read", "0xdd")
# Check if data on lvol bdev was zeroed.
# Malloc bdev should zero any data that is unmapped.
fail_count += self.run_fio_test("/dev/nbd0", offset, lvs['cluster_size'], "read", "0x00")
self.c.bdev_malloc_delete(base_name)
# Expected result:
# - calls successful, return code = 0
# - get_bdevs: no change
# - no other operation fails
return fail_count