spelling: test

Part of #2256

* achieve
* additionally
* against
* aliases
* already
* another
* arguments
* between
* capabilities
* comparison
* compatibility
* configuration
* continuing
* controlq
* cpumask
* default
* depends
* dereferenced
* discussed
* dissect
* driver
* environment
* everything
* excluded
* existing
* expectation
* failed
* fails
* following
* functions
* hugepages
* identifiers
* implicitly
* in_capsule
* increment
* initialization
* initiator
* integrity
* iteration
* latencies
* libraries
* management
* namespace
* negotiated
* negotiation
* nonexistent
* number
* occur
* occurred
* occurring
* offsetting
* operations
* outstanding
* overwhelmed
* parameter
* parameters
* partition
* preempts
* provisioned
* responded
* segment
* skipped
* struct
* subsystem
* success
* successfully
* sufficiently
* this
* threshold
* transfer
* transferred
* unchanged
* unexpected
* unregistered
* useless
* utility
* value
* variable
* workload

Change-Id: I21ca7dab4ef575b5767e50aaeabc34314ab13396
Signed-off-by: Josh Soref <jsoref@gmail.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10409
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Josh Soref 2021-11-24 20:40:59 -05:00 committed by Tomasz Zawadzki
parent 1ff3715d38
commit c9c7c281f8
74 changed files with 136 additions and 136 deletions

View File

@ -10,7 +10,7 @@ randomized PDU commands through a simulated iSCSI initiator.
3. iSCSI initiator send a logout request PDU to iSCSI Target in the end.
Especially, iSCSI initiator need to build different bhs according to different bhs opcode.
Then iSCSI initiator will receive all kinds of responsed opcodes from iSCSI Target.
Then iSCSI initiator will receive all kinds of response opcodes from iSCSI Target.
The application will terminate when run time expires (see the -t flag).
## Output

View File

@ -771,7 +771,7 @@ craft_virtio_scsi_tmf_req(struct fuzz_vhost_dev_ctx *dev_ctx, struct fuzz_vhost_
io_ctx->req.scsi_tmf_req.lun[1] = 0;
}
/* Valid controlqueue commands have to be of type 0, 1, or 2. Any others just return immediately from the target. */
/* Valid controlq commands have to be of type 0, 1, or 2. Any others just return immediately from the target. */
/* Try to only test the opcodes that will exercise extra paths in the target side. But allow for at least one invalid value. */
io_ctx->req.scsi_tmf_req.type = rand() % 4;
}

View File

@ -238,7 +238,7 @@ function run_qos_test() {
lower_limit=$((qos_limit * 9 / 10))
upper_limit=$((qos_limit * 11 / 10))
# QoS realization is related with bytes transfered. It currently has some variation.
# QoS realization is related with bytes transferred. It currently has some variation.
if [ $qos_result -lt $lower_limit ] || [ $qos_result -gt $upper_limit ]; then
echo "Failed to limit the io read rate of NULL bdev by qos"
$rpc_py bdev_malloc_delete $QOS_DEV_1

View File

@ -1344,7 +1344,7 @@ function nvme_namespace_revert() {
# This assumes every NVMe controller contains single namespace,
# encompassing Total NVM Capacity and formatted as 512 block size.
# 512 block size is needed for test/vhost/vhost_boot.sh to
# succesfully run.
# successfully run.
unvmcap=$(nvme id-ctrl ${nvme_ctrlr} | grep unvmcap | cut -d: -f2)
if [[ "$unvmcap" -eq 0 ]]; then

View File

@ -8,7 +8,7 @@ for spinning up a VM capable of running the SPDK test suite.
There is no need for external hardware to run these tests. The linux kernel comes with the drivers necessary
to emulate an RDMA enabled NIC. NVMe controllers can also be virtualized in emulators such as QEMU.
## VM Envronment Requirements (Host)
## VM Environment Requirements (Host)
- 8 GiB of RAM (for DPDK)
- Enable intel_kvm on the host machine from the bios.
@ -53,7 +53,7 @@ To create the VM image manually use following steps:
- Make sure that only root has read access to the private key.
3. Install the OS in the VM image (performed on guest):
- Use the latest Fedora Cloud (Currently Fedora 32).
- When partitioning the disk, make one partion that consumes the whole disk mounted at /. Do not encrypt the disk or enable LVM.
- When partitioning the disk, make one partition that consumes the whole disk mounted at /. Do not encrypt the disk or enable LVM.
- Choose the OpenSSH server packages during install.
4. Post installation configuration (performed on guest):
- Run the following commands to enable all necessary dependencies:

View File

@ -99,7 +99,7 @@ function install_refspdk() {
}
function install_qat() {
# Disect the kernel version into maj, min, release and local version
# Dissect the kernel version into maj, min, release and local version
local kernel_maj kernel_min kernel_rel kernel_loc
local kernel_ver

View File

@ -44,7 +44,7 @@ basic_rw() {
}
basic_offset() {
# Check if offseting works - using default io size of 4k
# Check if offsetting works - using default io size of 4k
local count seek skip data data_check
gen_bytes 4096 > "$test_file0"

View File

@ -141,7 +141,7 @@ test(void)
verify_buffer(buf2, len2);
/* Allocate a 4MB buffer. This should trigger a new hugepage allocation
* just for thie 4MB buffer.
* just for this 4MB buffer.
*/
len3 = 4 * 1024 * 1024;
printf("malloc %ju\n", len3);

View File

@ -139,7 +139,7 @@ pci_hook_test(void)
ut_dev.pci.id.vendor_id = 0x4;
ut_dev.pci.id.device_id = 0x8;
/* Use add parse for initilization */
/* Use add parse for initialization */
spdk_pci_addr_parse(&ut_dev.pci.addr, "10000:00:01.0");
CU_ASSERT(ut_dev.pci.addr.domain == 0x10000);
CU_ASSERT(ut_dev.pci.addr.bus == 0x0);

View File

@ -38,7 +38,7 @@ include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk
APP = app_repeat
C_SRCS := app_repeat.c
# Some of the modules and libaries are not repeatable yet, only organize
# Some of the modules and libraries are not repeatable yet, only organize
# the repeatable ones.
SPDK_LIB_LIST = event_bdev event_accel event_vmd event_sock
SPDK_LIB_LIST += event_nbd

View File

@ -63,7 +63,7 @@ alone_static:
$(CC) $(COMMON_CFLAGS) -o hello_bdev ./hello_bdev.c -pthread -Wl,--whole-archive,-Bstatic \
$(SPDK_DPDK_LIB) -Wl,--no-whole-archive,-Bdynamic $(SYS_LIB)
# Shows how to compile and external bdev and application sgainst the SPDK archives.
# Shows how to compile and external bdev and application against the SPDK archives.
bdev_static:
$(CC) $(COMMON_CFLAGS) -L../passthru -o hello_bdev ./hello_bdev.c -pthread -Wl,--whole-archive,-Bstatic -lpassthru_external $(SPDK_DPDK_LIB) \
-Wl,--no-whole-archive,-Bdynamic $(SYS_LIB)

View File

@ -14,7 +14,7 @@ function reactor_set_intr_mode() {
thd0_ids=($(reactor_get_thread_ids $r0_mask))
thd2_ids=($(reactor_get_thread_ids $r2_mask))
# Nubmer of thd0_ids shouldn't be zero
# Number of thd0_ids shouldn't be zero
if [[ ${#thd0_ids[*]} -eq 0 ]]; then
echo "spdk_thread is expected in reactor 0."
return 1

View File

@ -18,7 +18,7 @@ responses to specific keys that explicitly allow repeated key
declarations (e.g., TargetAddress)
The spec didn't make it clear what other keys could be re-declare
Disscussed this with UNH and get the conclusion that TargetName/
Discussed this with UNH and get the conclusion that TargetName/
TargetAddress/MaxRecvDataSegmentLength could be re-declare.
'''
'''

View File

@ -12,7 +12,7 @@ iscsitestinit
rpc_py="$rootdir/scripts/rpc.py"
fio_py="$rootdir/scripts/fio-wrapper"
# Namespaces are NOT used here on purpose. Rxe_cfg utilility used for NVMf tests do not support namespaces.
# Namespaces are NOT used here on purpose. Rxe_cfg utility used for NVMf tests do not support namespaces.
TARGET_IP=127.0.0.1
INITIATOR_IP=127.0.0.1
NETMASK=$INITIATOR_IP/32

View File

@ -19,7 +19,7 @@ CPUMASK=0x02
NUM_JOBS=1
ISCSI_TGT_CM=0x02
# Performance test for iscsi_tgt, run on devices with proper hardware support (target and inititator)
# Performance test for iscsi_tgt, run on devices with proper hardware support (target and initiator)
function usage() {
[[ -n $2 ]] && (
echo "$2"

View File

@ -114,7 +114,7 @@ def verify_iscsi_connection_rpc_methods(rpc_py):
output = rpc.iscsi_get_connections()
jsonvalues = json.loads(output)
verify(jsonvalues[0]['target_node_name'] == rpc_param['target_name'], 1,
"target node name vaule is {}, expected {}".format(jsonvalues[0]['target_node_name'], rpc_param['target_name']))
"target node name value is {}, expected {}".format(jsonvalues[0]['target_node_name'], rpc_param['target_name']))
verify(jsonvalues[0]['initiator_addr'] == rpc_param['initiator_ip'], 1,
"initiator address values is {}, expected {}".format(jsonvalues[0]['initiator_addr'], rpc_param['initiator_ip']))
verify(jsonvalues[0]['target_addr'] == rpc_param['target_ip'], 1,
@ -157,7 +157,7 @@ def verify_scsi_devices_rpc_methods(rpc_py):
output = rpc.scsi_get_devices()
jsonvalues = json.loads(output)
verify(jsonvalues[0]['device_name'] == nodebase + ":" + rpc_param['target_name'], 1,
"device name vaule is {}, expected {}".format(jsonvalues[0]['device_name'], rpc_param['target_name']))
"device name value is {}, expected {}".format(jsonvalues[0]['device_name'], rpc_param['target_name']))
verify(jsonvalues[0]['id'] == 0, 1,
"device id value is {}, expected 0".format(jsonvalues[0]['id']))

View File

@ -208,7 +208,7 @@ function create_bdev_subsystem_config() {
fi
# For LVOLs use split to check for proper order of initialization.
# If LVOLs cofniguration will be reordered (eg moved before splits or AIO/NVMe)
# If LVOLs configuration will be reordered (eg moved before splits or AIO/NVMe)
# it should fail loading JSON config from file.
tgt_rpc bdev_lvol_create_lvstore -c 1048576 ${lvol_store_base_bdev}p0 lvs_test
tgt_rpc bdev_lvol_create -l lvs_test lvol0 32
@ -227,12 +227,12 @@ function create_bdev_subsystem_config() {
if [[ $SPDK_TEST_CRYPTO -eq 1 ]]; then
tgt_rpc bdev_malloc_create 8 1024 --name MallocForCryptoBdev
if [[ $(lspci -d:37c8 | wc -l) -eq 0 ]]; then
local crypto_dirver=crypto_aesni_mb
local crypto_driver=crypto_aesni_mb
else
local crypto_dirver=crypto_qat
local crypto_driver=crypto_qat
fi
tgt_rpc bdev_crypto_create MallocForCryptoBdev CryptoMallocBdev $crypto_dirver 0123456789123456
tgt_rpc bdev_crypto_create MallocForCryptoBdev CryptoMallocBdev $crypto_driver 0123456789123456
expected_notifications+=(
bdev_register:MallocForCryptoBdev
bdev_register:CryptoMallocBdev

View File

@ -210,7 +210,7 @@ function test_construct_lvol_fio_clear_method_none() {
offset=$((last_cluster_of_metadata * jq_out["cluster_size"]))
size_metadata_end=$((offset - offset_metadata_end))
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchanged.
run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0x00
# Check if data on first lvol bdevs remains unchanged.
run_fio_test "$nbd_name" "$offset" "${jq_out["cluster_size"]}" "read" 0xdd
@ -269,7 +269,7 @@ function test_construct_lvol_fio_clear_method_unmap() {
offset=$((last_cluster_of_metadata * jq_out["cluster_size"]))
size_metadata_end=$((offset - offset_metadata_end))
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchaged.
# Check if data on area between end of metadata and first cluster of lvol bdev remained unchanged.
run_fio_test "$nbd_name" "$offset_metadata_end" "$size_metadata_end" "read" 0xdd
# Check if data on lvol bdev was zeroed. Malloc bdev should zero any data that is unmapped.
run_fio_test "$nbd_name" "$offset" "${jq_out["cluster_size"]}" "read" 0x00

View File

@ -159,7 +159,7 @@ function test_rename_lvs_negative() {
done
# Clean up
for bdev in "${bdev_aliases_1[@]}" "${bdev_alisaes_2[@]}"; do
for bdev in "${bdev_aliases_1[@]}" "${bdev_aliases_2[@]}"; do
rpc_cmd bdev_lvol_delete "$bdev"
done
rpc_cmd bdev_lvol_delete_lvstore -u "$lvs_uuid1"

View File

@ -17,7 +17,7 @@ function test_snapshot_compare_with_lvol_bdev() {
lvol_uuid1=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test1 "$lvol_size_mb" -t)
lvol_uuid2=$(rpc_cmd bdev_lvol_create -u "$lvs_uuid" lvol_test2 "$lvol_size_mb")
# Fill thin provisoned lvol bdev with 50% of its space
# Fill thin provisioned lvol bdev with 50% of its space
nbd_start_disks "$DEFAULT_RPC_ADDR" "$lvol_uuid1" /dev/nbd0
count=$((lvol_size / LVS_DEFAULT_CLUSTER_SIZE / 2))
dd if=/dev/urandom of=/dev/nbd0 oflag=direct bs="$LVS_DEFAULT_CLUSTER_SIZE" count=$count

View File

@ -147,7 +147,7 @@ function test_delete_lvol_store_persistent_positive() {
rpc_cmd bdev_aio_create "$aio0" "$bdev_aio_name" "$bdev_block_size"
# Wait 1 second to allow time for lvolstore tasting
sleep 1
# bdev_lvol_get_lvstores should not report any existsing lvol stores in configuration
# bdev_lvol_get_lvstores should not report any existing lvol stores in configuration
# after deleting and adding NVMe bdev, thus check if destroyed lvol store does not exist
# on aio bdev anymore.
rpc_cmd bdev_lvol_get_lvstores -u "$lvstore_uuid" && false

View File

@ -203,7 +203,7 @@ function test_thin_overprovisioning() {
run_fio_test /dev/nbd1 0 $fill_size "write" "0xcc"
# Fill rest of second bdev
# Check that error message occured while filling second bdev with data
# Check that error message occurred while filling second bdev with data
offset=$fill_size
fill_size_rest=$((lvol_size - fill_size))
run_fio_test /dev/nbd1 "$offset" "$fill_size_rest" "write" "0xcc" && false

View File

@ -445,7 +445,7 @@ spdk_aer_changed_ns_test(void)
g_aer_done = 0;
printf("Starting namespce attribute notice tests for all controllers...\n");
printf("Starting namespace attribute notice tests for all controllers...\n");
foreach_dev(dev) {
get_feature_test(dev);

View File

@ -1014,7 +1014,7 @@ identify_ns(void)
spdk_nvme_detach(ctrlr);
}
/* Mandatory Log Page Identidfiers
/* Mandatory Log Page Identifiers
* 01h Error Information
* 02h SMART / Health Information
* 03h Firmware Slot Information

View File

@ -16,13 +16,13 @@ run_test "nvme_cuse_rpc" $testdir/nvme_cuse_rpc.sh
run_test "nvme_cli_cuse" $testdir/spdk_nvme_cli_cuse.sh
run_test "nvme_smartctl_cuse" $testdir/spdk_smartctl_cuse.sh
# Only run Namespace managment test case when such device is present
# Only run Namespace management test case when such device is present
bdfs=$(get_nvme_bdfs)
$rootdir/scripts/setup.sh reset
sleep 1
# Find bdf that supports Namespace managment
# Find bdf that supports Namespace management
for bdf in $bdfs; do
nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
if [[ -z "$nvme_name" ]]; then

View File

@ -16,7 +16,7 @@ bdfs=$(get_nvme_bdfs)
$rootdir/scripts/setup.sh reset
# Find bdf that supports Namespace Managment
# Find bdf that supports Namespace Management
for bdf in $bdfs; do
nvme_name=$(get_nvme_ctrlr_from_bdf ${bdf})
if [[ -z "$nvme_name" ]]; then
@ -33,7 +33,7 @@ for bdf in $bdfs; do
done
if [[ "${nvme_name}" == "" ]] || [[ "$oacs_ns_manage" -eq 0 ]]; then
echo "No NVMe device supporting Namespace managment found"
echo "No NVMe device supporting Namespace management found"
$rootdir/scripts/setup.sh
exit 1
fi
@ -57,7 +57,7 @@ function clean_up() {
# This assumes every NVMe controller contains single namespace,
# encompassing Total NVM Capacity and formatted as 512 block size.
# 512 block size is needed for test/vhost/vhost_boot.sh to
# succesfully run.
# successfully run.
tnvmcap=$($NVME_CMD id-ctrl ${nvme_dev} | grep tnvmcap | cut -d: -f2)
blksize=512

View File

@ -59,7 +59,7 @@ if [ "$CUSE_SMART_ERRLOG" != "$KERNEL_SMART_ERRLOG" ]; then
exit 1
fi
# Data integity was checked before, now make sure other commads didn't fail
# Data integrity was checked before, now make sure other commads didn't fail
${SMARTCTL_CMD} -i /dev/spdk/nvme0n1
${SMARTCTL_CMD} -c /dev/spdk/nvme0
${SMARTCTL_CMD} -A /dev/spdk/nvme0

View File

@ -725,7 +725,7 @@ int main(int argc, char **argv)
cleanup();
if (rc != 0) {
fprintf(stderr, "%s: errors occured\n", argv[0]);
fprintf(stderr, "%s: errors occurred\n", argv[0]);
}
return rc;

View File

@ -5,7 +5,7 @@
The link time optimization (lto) gcc flag allows the linker to run a post-link optimization pass on the code. During
that pass the linker inlines thin wrappers such as those around DPDK calls which results in a shallow call stack and
significantly improves performance. Therefore, we recommend compiling SPDK with the lto flag prior to running this
benchmark script to archieve optimal performance.
benchmark script to achieve optimal performance.
Link time optimization can be enabled in SPDK by doing the following:
~{.sh}
@ -95,7 +95,7 @@ Specifies how many times run each workload. End results are averages of these wo
#### --no-preconditioning
By default disks are preconditioned before test using fio with parameters: size=100%, loops=2, bs=1M, w=write,
iodepth=32, ioengine=spdk. It can be skiped when this option is set.
iodepth=32, ioengine=spdk. It can be skipped when this option is set.
#### "--no-io-scaling"
@ -104,6 +104,6 @@ For SPDK fio plugin iodepth is multiplied by number of devices. When this option
## Results
Results are stored in "results" folder. After each workload, to this folder are copied files with:
fio configuration file, json files with fio results and logs with latiencies with sampling interval 250 ms.
Number of copied files depends from number of repeats of each workload. Additionall csv file is created with averaged
fio configuration file, json files with fio results and logs with latencies with sampling interval 250 ms.
Number of copied files depends from number of repeats of each workload. Additionally csv file is created with averaged
results of all workloads.

View File

@ -209,7 +209,7 @@ function create_fio_config() {
cores_numa=($(get_cores_numa_node "${cores[*]}"))
# Following part of this function still leverages global variables a lot.
# It's a mix of local variables passed as aruments to function with global variables. This is messy.
# It's a mix of local variables passed as arguments to function with global variables. This is messy.
# TODO: Modify this to be consistent with how variables are used here. Aim for using only
# local variables to get rid of globals as much as possible.
desc="\"Test io_plugin=$PLUGIN Blocksize=${BLK_SIZE} Workload=$RW MIX=${MIX} qd=${IODEPTH}\""

View File

@ -150,7 +150,7 @@ while getopts 'h-:' optchar; do
disk-config=*)
DISKCFG="${OPTARG#*=}"
if [[ ! -f "$DISKCFG" ]]; then
echo "Disk confiuration file $DISKCFG does not exist!"
echo "Disk configuration file $DISKCFG does not exist!"
exit 1
fi
;;
@ -302,7 +302,7 @@ p99_99_lat_disks_usec=0
stdev_disks_usec=0
mean_slat_disks_usec=0
mean_clat_disks_usec=0
#Run each workolad $REPEAT_NO times
#Run each workload $REPEAT_NO times
for ((j = 0; j < REPEAT_NO; j++)); do
if [ $PLUGIN = "spdk-perf-bdev" ]; then
run_bdevperf > $TMP_RESULT_FILE

View File

@ -713,7 +713,7 @@ cleanup:
free_tasks();
if (rc != 0) {
fprintf(stderr, "%s: errors occured\n", argv[0]);
fprintf(stderr, "%s: errors occurred\n", argv[0]);
}
return rc;

View File

@ -15,7 +15,7 @@ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK0000000000000
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
$rpc_py bdev_null_create NULL1 1000 512
# Subsystem destruction process waits for all controllers to be destroyed which in turn wait
# for all qpairs to be deleted. A qpair can only be deleted when all outstaind requests are completed
# for all qpairs to be deleted. A qpair can only be deleted when all outstanding requests are completed
# bdev_delay is used in this test to make a situation when qpair has outstanding requests when disconnect
# starts. It allows to trigger async qpair/controller/subsystem destruction path
$rpc_py bdev_delay_create -b NULL1 -d Delay0 -r 1000000 -t 1000000 -w 1000000 -n 1000000

View File

@ -42,11 +42,11 @@ function nvmf_filesystem_create() {
}
function nvmf_filesystem_part() {
incapsule=$1
in_capsule=$1
nvmfappstart -m 0xF
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c $incapsule
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192 -c $in_capsule
$rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc1
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
@ -63,14 +63,14 @@ function nvmf_filesystem_part() {
partprobe
sleep 1
if [ $incapsule -eq 0 ]; then
if [ $in_capsule -eq 0 ]; then
run_test "filesystem_ext4" nvmf_filesystem_create "ext4" ${nvme_name}
run_test "filesystem_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name}
run_test "filesystem_xfs" nvmf_filesystem_create "xfs" ${nvme_name}
else
run_test "filesystem_incapsule_ext4" nvmf_filesystem_create "ext4" ${nvme_name}
run_test "filesystem_incapsule_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name}
run_test "filesystem_incapsule_xfs" nvmf_filesystem_create "xfs" ${nvme_name}
run_test "filesystem_in_capsule_ext4" nvmf_filesystem_create "ext4" ${nvme_name}
run_test "filesystem_in_capsule_btrfs" nvmf_filesystem_create "btrfs" ${nvme_name}
run_test "filesystem_in_capsule_xfs" nvmf_filesystem_create "xfs" ${nvme_name}
fi
parted -s /dev/${nvme_name} rm 1
@ -86,7 +86,7 @@ function nvmf_filesystem_part() {
nvmfpid=
}
run_test "nvmf_filesystem_no_incapsule" nvmf_filesystem_part 0
run_test "nvmf_filesystem_incapsule" nvmf_filesystem_part 4096
run_test "nvmf_filesystem_no_in_capsule" nvmf_filesystem_part 0
run_test "nvmf_filesystem_in_capsule" nvmf_filesystem_part 4096
nvmftestfini

View File

@ -55,7 +55,7 @@ out=$("$rpc" nvmf_create_subsystem -s "$(gen_random_s 21)" "$nqn$RANDOM" 2>&1) &
out=$("$rpc" nvmf_create_subsystem -d "$(gen_random_s 41)" "$nqn$RANDOM" 2>&1) && false
[[ $out == *"Invalid MN"* ]]
# Attempt to delete non-existing subsytem listener
# Attempt to delete non-existing subsystem listener
$rpc nvmf_create_transport --trtype "$TEST_TRANSPORT"
$rpc nvmf_create_subsystem $nqn -s SPDK001 -a
if [[ $TEST_TRANSPORT == "TCP" ]]; then

View File

@ -15,7 +15,7 @@ function check_ana_state() {
local ctrl_id=$2
local ana_state=$3
# Very rarely a connection is lost and Linux NVMe host tries reconnecting
# after 10 seconds delay. For this case, set a sufficienntly long timeout.
# after 10 seconds delay. For this case, set a sufficiently long timeout.
# Linux NVMe host usually recognizes the new ANA state within 2 seconds.
local timeout=20

View File

@ -29,7 +29,7 @@ done
# by running 6 different FIO jobs, each with 13 subjobs, we end up with 78 fio threads trying to write to
# our target at once. This completely overwhelms the target SRQ, but allows us to verify that rnr_retry is
# working even at very high queue depths because the rdma qpair doesn't fail.
# It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmmed target,
# It is normal to see the initiator timeout and reconnect waiting for completions from an overwhelmed target,
# but the connection should come up and FIO should complete without errors.
$rootdir/scripts/fio-wrapper -p nvmf -i 1048576 -d 128 -t read -r 10 -n 13

View File

@ -377,7 +377,7 @@ function bdev_pmem_delete_pool_tc1() {
pmem_clean_pool_file
if $rpc_py bdev_pmem_delete_pool $default_pool_file; then
error "bdev_pmem_delete_pool deleted inexistant pool file!"
error "bdev_pmem_delete_pool deleted nonexistent pool file!"
fi
return 0

View File

@ -478,7 +478,7 @@ get_cpu_time() {
cpu_time=${cpu_time_map["$cpu_time"]:-3}
interval=$((interval <= 0 ? 1 : interval))
# We skip first sample to have min 2 for stat comparision
# We skip first sample to have min 2 for stat comparison
interval=$((interval + 1)) interval_count=0
while ((interval_count++, --interval >= 0)); do
for cpu in "${cpus[@]}"; do

View File

@ -75,7 +75,7 @@ verify_dpdk_governor() {
local samples=0 all_set=0 dir=-1 old_main_core_setspeed=0
exec_under_dynamic_scheduler "${SPDK_APP[@]}" -m "$spdk_cpusmask" --main-core "$spdk_main_core"
exec_under_dynamic_scheduler "${SPDK_APP[@]}" -m "$spdk_cpumask" --main-core "$spdk_main_core"
while ((all_set == 0 && samples++ <= 50)); do
update_main_core_cpufreq

View File

@ -30,10 +30,10 @@ thread_stats() {
idle() {
local reactor_framework
local reactors thread
local cpusmask thread_cpumask
local cpumask thread_cpumask
local threads
exec_under_dynamic_scheduler "${SPDK_APP[@]}" -m "$spdk_cpusmask" --main-core "$spdk_main_core"
exec_under_dynamic_scheduler "${SPDK_APP[@]}" -m "$spdk_cpumask" --main-core "$spdk_main_core"
# The expectation here is that when SPDK app is idle the following is true:
# - all threads are assigned to main lcore
@ -41,7 +41,7 @@ idle() {
xtrace_disable
while ((samples++ < 5)); do
cpusmask=0
cpumask=0
reactor_framework=$(rpc_cmd framework_get_reactors | jq -r '.reactors[]')
threads=($(
jq -r "select(.lcore == $spdk_main_core) | .lw_threads[].name" <<< "$reactor_framework"
@ -49,10 +49,10 @@ idle() {
for thread in "${threads[@]}"; do
thread_cpumask=0x$(jq -r "select(.lcore == $spdk_main_core) | .lw_threads[] | select(.name == \"$thread\") | .cpumask" <<< "$reactor_framework")
((cpusmask |= thread_cpumask))
((cpumask |= thread_cpumask))
done
printf 'SPDK cpumask: %x Threads cpumask: %x\n' "$spdk_cpusmask" "$cpusmask"
printf 'SPDK cpumask: %x Threads cpumask: %x\n' "$spdk_cpumask" "$cpumask"
thread_stats
done

View File

@ -72,6 +72,6 @@ interrupt() {
done
}
exec_under_dynamic_scheduler "$scheduler" -m "$spdk_cpusmask" --main-core "$spdk_main_core"
exec_under_dynamic_scheduler "$scheduler" -m "$spdk_cpumask" --main-core "$spdk_main_core"
interrupt

View File

@ -21,7 +21,7 @@ allowed_list "$NUM_CPUS" 0
# Assign proper resources to the cpuset/spdk
spdk_cpus=("${allowed[@]}")
spdk_cpus_csv=$(fold_array_onto_string "${spdk_cpus[@]}")
spdk_cpusmask=$(mask_cpus "${spdk_cpus[@]}")
spdk_cpumask=$(mask_cpus "${spdk_cpus[@]}")
spdk_main_core=${spdk_cpus[0]}
spdk_cpus_mems=0
@ -32,14 +32,14 @@ filter_allowed_list
all_cpus=("${allowed[@]}")
all_cpus_csv=$(fold_array_onto_string "${all_cpus[@]}")
all_cpusmask=$(mask_cpus "${all_cpus[@]}")
all_cpumask=$(mask_cpus "${all_cpus[@]}")
export \
"spdk_cpusmask=$spdk_cpusmask" \
"spdk_cpumask=$spdk_cpumask" \
"spdk_cpus_csv=$spdk_cpus_csv" \
"spdk_cpus_no=${#spdk_cpus[@]}" \
"spdk_main_core=$spdk_main_core" \
"all_cpusmask=$all_cpusmask" \
"all_cpumask=$all_cpumask" \
"all_cpus_csv=$all_cpus_csv"
xtrace_restore

View File

@ -142,7 +142,7 @@ core_load() {
# Re-exec the scheduler app to make sure rr balancer won't affect threads without
# configured cpumask from the previous test suites.
exec_under_dynamic_scheduler "$scheduler" -m "$spdk_cpusmask" --main-core "$spdk_main_core"
exec_under_dynamic_scheduler "$scheduler" -m "$spdk_cpumask" --main-core "$spdk_main_core"
# Create thread0 with 90% activity no cpumask, expecting it to remain on main cpu
thread0=$(create_thread -n "thread0" -a 90)
@ -201,7 +201,7 @@ core_load() {
thread5=$(create_thread -n "thread5" -a 25)
# Three iterations are needed, as all threads look active on first iteration since they are on the main core.
# Second iteraion will have them spread out over cores and only third will collapse to the expected scenario.
# Second iteration will have them spread out over cores and only third will collapse to the expected scenario.
sleep $((3 * sched_period))
update_thread_cpus_map
@ -233,7 +233,7 @@ core_load() {
((on_main_core == 5 && on_next_core == 1))
}
exec_under_dynamic_scheduler "$scheduler" -m "$spdk_cpusmask" --main-core "$spdk_main_core"
exec_under_dynamic_scheduler "$scheduler" -m "$spdk_cpumask" --main-core "$spdk_main_core"
run_test "busy" busy
run_test "balanced" balanced

View File

@ -7,12 +7,12 @@ shopt -s extglob nullglob
declare -a nodes_sys=()
declare -i default_huges=0
declare -i default_hugepages=0
declare -i no_nodes=0
declare -i nr_hugepages=0
default_huges=$(get_meminfo Hugepagesize)
default_huge_nr=/sys/kernel/mm/hugepages/hugepages-${default_huges}kB/nr_hugepages
default_hugepages=$(get_meminfo Hugepagesize)
default_huge_nr=/sys/kernel/mm/hugepages/hugepages-${default_hugepages}kB/nr_hugepages
global_huge_nr=/proc/sys/vm/nr_hugepages
# Make sure environment doesn't affect the tests
@ -25,7 +25,7 @@ get_nodes() {
local node
for node in /sys/devices/system/node/node+([0-9]); do
nodes_sys[${node##*node}]=$(< "$node/hugepages/hugepages-${default_huges}kB/nr_hugepages")
nodes_sys[${node##*node}]=$(< "$node/hugepages/hugepages-${default_hugepages}kB/nr_hugepages")
done
no_nodes=${#nodes_sys[@]}
((no_nodes > 0))
@ -50,9 +50,9 @@ get_test_nr_hugepages() {
local node_ids=("$@")
fi
((size >= default_huges))
((size >= default_hugepages))
nr_hugepages=$(((size + default_huges - 1) / default_huges))
nr_hugepages=$(((size + default_hugepages - 1) / default_hugepages))
get_test_nr_hugepages_per_node "${node_ids[@]}"
}
@ -98,7 +98,7 @@ verify_nr_hugepages() {
# There's no obvious way of determining which NUMA node is going to end
# up with an odd number of hugepages in case such number was actually
# allocated by the kernel. Considering that, let's simply check if our
# expaction is met by sorting and comparing it with nr of hugepages that
# expectation is met by sorting and comparing it with nr of hugepages that
# was actually allocated on each node.
for node in "${!nodes_test[@]}"; do
@ -176,7 +176,7 @@ hp_status() {
while read -r node size free _ total; do
size=${size/kB/} node=${node#node}
((size == default_huges)) || continue
((size == default_hugepages)) || continue
((free == nodes_test[node]))
((total == nodes_test[node]))
done < <(setup output status |& grep "node[0-9]")

View File

@ -208,7 +208,7 @@ test_spdk_accel_get_capabilities(void)
{
uint64_t cap, expected_cap;
/* Setup a few capabilites and make sure they are reported as expected. */
/* Setup a few capabilities and make sure they are reported as expected. */
g_accel_ch->engine = &g_accel_engine;
expected_cap = ACCEL_COPY | ACCEL_DUALCAST | ACCEL_CRC32C;
g_accel_ch->engine->capabilities = expected_cap;

View File

@ -1650,7 +1650,7 @@ bdev_io_boundary_split_test(void)
CU_ASSERT(g_io_done == true);
CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
/* Test if a multi vector command terminated with failure before continueing
/* Test if a multi vector command terminated with failure before continuing
* splitting process when one of child I/O failed.
* The multi vector command is as same as the above that needs to be split by strip
* and then needs to be split further due to the capacity of child iovs.
@ -1732,7 +1732,7 @@ bdev_io_boundary_split_test(void)
ut_expected_io_set_iov(expected_io, 0,
(void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e);
/* position 31 picked the the rest of the trasnfer to get us to 0x4200 */
/* position 31 picked the the rest of the transfer to get us to 0x4200 */
ut_expected_io_set_iov(expected_io, 1,
(void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
@ -2455,7 +2455,7 @@ bdev_io_mix_split_test(void)
*/
/* The first 2 IOs are in an IO boundary.
* After splitting segmemt size the segment num exceeds.
* After splitting segment size the segment num exceeds.
* So it splits to 2 child IOs.
*/
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6);

View File

@ -93,7 +93,7 @@ static uint64_t g_unexpected_bdev_blocklen;
static bool g_append_with_md;
static int g_unexpected_iovcnt;
static void *g_md_buf;
static void *g_unexpetced_md_buf;
static void *g_unexpected_md_buf;
static void *g_buf;
static void *g_unexpected_buf;
@ -114,7 +114,7 @@ test_setup(void)
g_append_with_md = false;
g_unexpected_iovcnt = 1000;
g_md_buf = (void *)0xEFDCFEDE;
g_unexpetced_md_buf = (void *)0xFECDEFDC;
g_unexpected_md_buf = (void *)0xFECDEFDC;
g_buf = (void *)0xFEEDBEEF;
g_unexpected_buf = (void *)0xDEADBEEF;
@ -139,7 +139,7 @@ start_operation(void)
g_zone_op->bdev.iovs[0].iov_base = g_unexpected_buf;
g_zone_op->bdev.iovs[0].iov_len = g_unexpected_num_blocks * g_unexpected_bdev_blocklen;
g_zone_op->bdev.iovcnt = g_unexpected_iovcnt;
g_zone_op->bdev.md_buf = g_unexpetced_md_buf;
g_zone_op->bdev.md_buf = g_unexpected_md_buf;
g_zone_op->bdev.num_blocks = g_unexpected_num_blocks;
g_zone_op->bdev.offset_blocks = g_unexpected_zone_id;
g_zone_op->bdev.start_lba = g_unexpected_start_lba;

View File

@ -1028,7 +1028,7 @@ test_assign_device_qp(void)
/* QAT testing is more complex as the code under test load balances by
* assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
* g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
* each with 2 qp so the "spread" betwen assignments is 32.
* each with 2 qp so the "spread" between assignments is 32.
*/
g_qat_total_qp = 96;
for (i = 0; i < g_qat_total_qp; i++) {

View File

@ -1088,7 +1088,7 @@ enomem(void)
CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
/*
* Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io,
* Complete enough I/O to hit the nomem_threshold. This should trigger retrying nomem_io,
* and we should see I/O get resubmitted to the test bdev module.
*/
stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);

View File

@ -3812,7 +3812,7 @@ test_find_io_path(void)
nbdev_ch.current_io_path = NULL;
/* Test if io_path whose qpair is resetting is excluced. */
/* Test if io_path whose qpair is resetting is excluded. */
ctrlr_ch1.qpair = NULL;
CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL);

View File

@ -6609,7 +6609,7 @@ blob_io_unit(void)
}
static void
blob_io_unit_compatiblity(void)
blob_io_unit_compatibility(void)
{
struct spdk_bs_opts bsopts;
struct spdk_blob_store *bs;
@ -6864,7 +6864,7 @@ blob_persist_test(void)
SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
/* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again.
/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
* Interrupt the first sync after increasing number of poller iterations, until it succeeds.
* Expectation is that after second sync completes no xattr is saved in metadata. */
poller_iterations = 1;
@ -7161,7 +7161,7 @@ int main(int argc, char **argv)
CU_ADD_TEST(suite_bs, blob_operation_split_rw);
CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
CU_ADD_TEST(suite, blob_io_unit);
CU_ADD_TEST(suite, blob_io_unit_compatiblity);
CU_ADD_TEST(suite, blob_io_unit_compatibility);
CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
CU_ADD_TEST(suite_bs, blob_persist_test);
CU_ADD_TEST(suite_bs, blob_decouple_snapshot);

View File

@ -87,9 +87,9 @@ dev_reset_power_failure_counters(void)
/**
* Set power failure event. Power failure will occur after given number
* of IO operations. It may occure after number of particular operations
* of IO operations. It may occur after number of particular operations
* (read, write, unmap, write zero or flush) or after given number of
* any IO operations (general_treshold). Value 0 means that the treshold
* any IO operations (general_threshold). Value 0 means that the threshold
* is disabled. Any other value is the number of operation starting from
* which power failure event will happen.
*/

View File

@ -300,7 +300,7 @@ test_next_xfer_addr(void)
struct ftl_addr addr, result, expect;
setup_band();
/* Verify simple one block incremention */
/* Verify simple one block increment */
addr = addr_from_punit(0);
addr.offset += TEST_BAND_IDX * ftl_get_num_blocks_in_band(g_dev);
expect = addr;

View File

@ -60,10 +60,10 @@ set_up_subsystem(struct spdk_subsystem *subsystem, const char *name)
static void
set_up_depends(struct spdk_subsystem_depend *depend, const char *subsystem_name,
const char *dpends_on_name)
const char *depends_on_name)
{
depend->name = subsystem_name;
depend->depends_on = dpends_on_name;
depend->depends_on = depends_on_name;
}
static void

View File

@ -255,14 +255,14 @@ op_login_session_normal_test(void)
CU_ASSERT(rsph->status_class == ISCSI_CLASS_INITIATOR_ERROR);
CU_ASSERT(rsph->status_detail == ISCSI_LOGIN_CONN_ADD_FAIL);
/* expect suceess: drop the session */
/* expect success: drop the session */
rsph->tsih = 0; /* to create the session */
g_iscsi.AllowDuplicateIsid = false;
rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,
&param, 0);
CU_ASSERT(rc == 0);
/* expect suceess: create the session */
/* expect success: create the session */
rsph->tsih = 0; /* to create the session */
g_iscsi.AllowDuplicateIsid = true;
rc = iscsi_op_login_session_normal(&conn, &rsp_pdu, UT_INITIATOR_NAME1,

View File

@ -58,8 +58,8 @@ DEFINE_STUB(iscsi_send_tgts, int,
0);
static void
burst_length_param_negotation(int FirstBurstLength, int MaxBurstLength,
int initialR2T)
burst_length_param_negotiation(int FirstBurstLength, int MaxBurstLength,
int initialR2T)
{
struct spdk_iscsi_sess sess;
struct spdk_iscsi_conn conn;
@ -149,12 +149,12 @@ burst_length_param_negotation(int FirstBurstLength, int MaxBurstLength,
static void
param_negotiation_test(void)
{
burst_length_param_negotation(8192, 16384, 0);
burst_length_param_negotation(8192, 16384, 1);
burst_length_param_negotation(8192, 1024, 1);
burst_length_param_negotation(8192, 1024, 0);
burst_length_param_negotation(512, 1024, 1);
burst_length_param_negotation(512, 1024, 0);
burst_length_param_negotiation(8192, 16384, 0);
burst_length_param_negotiation(8192, 16384, 1);
burst_length_param_negotiation(8192, 1024, 1);
burst_length_param_negotiation(8192, 1024, 0);
burst_length_param_negotiation(512, 1024, 1);
burst_length_param_negotiation(512, 1024, 0);
}
static void

View File

@ -801,7 +801,7 @@ char ut_json_text[] =
"{"
" \"string\": \"Some string data\","
" \"object\": { "
" \"another_string\": \"Yet anoter string data\","
" \"another_string\": \"Yet another string data\","
" \"array name with space\": [1, [], {} ]"
" },"
" \"array\": [ \"Text\", 2, {} ]"

View File

@ -371,7 +371,7 @@ test_parse_request_streaming(void)
CU_ASSERT(rc == 0);
CU_ASSERT(g_request == NULL);
/* In case of faile, don't fload console with ussless CU assert fails. */
/* In case of failed, don't fload console with useless CU assert fails. */
FREE_REQUEST();
}
@ -403,7 +403,7 @@ int main(int argc, char **argv)
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
/* This is for ASAN. Don't know why but if pointer is left in global varaible
/* This is for ASAN. Don't know why but if pointer is left in global variable
* it won't be detected as leak. */
g_request = NULL;
return num_failures;

View File

@ -46,7 +46,7 @@ DEFINE_STUB_V(nvme_ctrlr_proc_put_ref, (struct spdk_nvme_ctrlr *ctrlr));
DEFINE_STUB_V(nvme_ctrlr_fail, (struct spdk_nvme_ctrlr *ctrlr, bool hotremove));
DEFINE_STUB(spdk_nvme_transport_available_by_name, bool,
(const char *transport_name), true);
/* return anything non-NULL, this won't be deferenced anywhere in this test */
/* return anything non-NULL, this won't be dereferenced anywhere in this test */
DEFINE_STUB(nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
(struct spdk_nvme_ctrlr *ctrlr), (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1);
DEFINE_STUB(nvme_ctrlr_process_init, int,
@ -689,7 +689,7 @@ test_nvme_allocate_request_null(void)
req = nvme_allocate_request_null(&qpair, cb_fn, cb_arg);
/*
* Compare the req with the parmaters that we passed in
* Compare the req with the parameters that we passed in
* as well as what the function is supposed to update.
*/
SPDK_CU_ASSERT_FATAL(req != NULL);

View File

@ -435,7 +435,7 @@ test_nvme_ns_has_supported_iocs_specific_data(void)
/* case 2: ns.csi == SPDK_NVME_CSI_ZNS. Expect: true */
ns.csi = SPDK_NVME_CSI_ZNS;
CU_ASSERT(nvme_ns_has_supported_iocs_specific_data(&ns) == true);
/* case 3: defult ns.csi == SPDK_NVME_CSI_KV. Expect: false */
/* case 3: default ns.csi == SPDK_NVME_CSI_KV. Expect: false */
ns.csi = SPDK_NVME_CSI_KV;
CU_ASSERT(nvme_ns_has_supported_iocs_specific_data(&ns) == false);
}

View File

@ -963,7 +963,7 @@ test_nvme_tcp_pdu_ch_handle(void)
CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 2);
/* case 3: The TCP/IP tqpair connection is not negotitated. Expect: fail */
/* case 3: The TCP/IP tqpair connection is not negotiated. Expect: fail */
tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);

View File

@ -346,7 +346,7 @@ run_create_conn_test(struct spdk_nvmf_host *host,
to_be32(&cc_rqst.assoc_id.desc_len,
sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
(2 * sizeof(uint32_t)));
cc_rqst.assoc_id.association_id = assoc_id; /* alreday be64 */
cc_rqst.assoc_id.association_id = assoc_id; /* already be64 */
ls_rqst.rqstbuf.virt = &cc_rqst;
ls_rqst.rspbuf.virt = respbuf;
@ -390,7 +390,7 @@ run_disconn_test(struct spdk_nvmf_fc_nport *tgt_port,
to_be32(&dc_rqst.assoc_id.desc_len,
sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
(2 * sizeof(uint32_t)));
dc_rqst.assoc_id.association_id = assoc_id; /* alreday be64 */
dc_rqst.assoc_id.association_id = assoc_id; /* already be64 */
ls_rqst.rqstbuf.virt = &dc_rqst;
ls_rqst.rspbuf.virt = respbuf;

View File

@ -929,7 +929,7 @@ test_reservation_acquire_preempt_1(void)
SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
/* TEST CASE: g_ctrlr1_A holds the reservation, g_ctrlr_B preempt g_ctrl1_A,
* g_ctrl1_A registrant is unregistred.
* g_ctrl1_A registrant is unregistered.
*/
gen = g_ns.gen;
ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
@ -1311,7 +1311,7 @@ test_reservation_preempt_notification(void)
SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
/* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B,
* g_ctrlr_B registrant is unregistred, and reservation is preempted.
* g_ctrlr_B registrant is unregistered, and reservation is preempted.
* Registration Preempted notification sends to g_ctrlr_B.
* Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A.
*/

View File

@ -429,7 +429,7 @@ test_nvmf_tcp_create(void)
opts.max_io_size = UT_MAX_IO_SIZE;
opts.io_unit_size = 16;
opts.max_aq_depth = UT_MAX_AQ_DEPTH;
/* expect failse */
/* expect fails */
transport = nvmf_tcp_create(&opts);
CU_ASSERT_PTR_NULL(transport);
@ -669,7 +669,7 @@ test_nvmf_tcp_h2c_data_hdr_handle(void)
static void
test_nvmf_tcp_incapsule_data_handle(void)
test_nvmf_tcp_in_capsule_data_handle(void)
{
struct spdk_nvmf_tcp_transport ttransport = {};
struct spdk_nvmf_tcp_qpair tqpair = {};
@ -1107,7 +1107,7 @@ int main(int argc, char **argv)
CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create);
CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data);
CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle);
CU_ADD_TEST(suite, test_nvmf_tcp_incapsule_data_handle);
CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle);
CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource);
CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req);
CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu);

View File

@ -108,7 +108,7 @@ get_pm_file_size(void)
expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
/* 100 chunks in logical map * 8 bytes per chunk */
expected_pm_size += 100 * sizeof(uint64_t);
/* 100 chunks * (chunk stuct size + 4 backing io units per chunk * 8 bytes per backing io unit) */
/* 100 chunks * (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit) */
expected_pm_size += 100 * (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
/* reduce allocates some extra chunks too for in-flight writes when logical map
* is full. REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c Here we need the num chunks

View File

@ -344,7 +344,7 @@ test_reservation_preempt_non_all_regs(void)
SPDK_CU_ASSERT_FATAL(g_lun.reservation.crkey == 0xa);
SPDK_CU_ASSERT_FATAL(g_lun.pr_generation == gen);
/* Test Case: Host B premmpts Host A, Check condition is expected
/* Test Case: Host B preempts Host A, Check condition is expected
* for zeroed service action reservation key */
task.initiator_port = &g_i_port_b;
task.status = 0;
@ -354,7 +354,7 @@ test_reservation_preempt_non_all_regs(void)
SPDK_CU_ASSERT_FATAL(rc < 0);
SPDK_CU_ASSERT_FATAL(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
/* Test Case: Host B preempts Host A, Host A is unregisted */
/* Test Case: Host B preempts Host A, Host A is unregistered */
task.status = 0;
gen = g_lun.pr_generation;
rc = scsi_pr_out_preempt(&task, SPDK_SCSI_PR_OUT_PREEMPT,

View File

@ -206,7 +206,7 @@ flush_server(void)
req2->cb_arg = &cb_arg2;
/* we should not call _sock_flush directly, since it will finally
* call liburing related funtions */
* call liburing related functions */
/* Simple test - a request with a 2 element iovec
* that is fully completed. */

View File

@ -1441,7 +1441,7 @@ cache_closest_timed_poller(void)
CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller2);
/* If we unregister a timed poller by spdk_poller_unregister()
* when it is waiting, it is marked as being unregistereed and
* when it is waiting, it is marked as being unregistered and
* is actually unregistered when it is expired.
*
* Hence if we unregister the closest timed poller when it is waiting,

View File

@ -1739,7 +1739,7 @@ set_md_interleave_iovs_alignment_test(void)
rc = spdk_dif_set_md_interleave_iovs(dif_iovs, 5, iovs, 3, 0, 2048, &mapped_len, &ctx);
CU_ASSERT(rc == -ERANGE);
/* The folllowing are the normal cases. */
/* The following are the normal cases. */
_iov_set_buf(&iovs[2], (uint8_t *)0xC0FFEE, 32);
/* data length is less than a data block size. */

View File

@ -276,7 +276,7 @@ create_controller_test(void)
spdk_cpuset_parse(&g_vhost_core_mask, "0xf");
/* Create device with cpumask implcitly matching whole application */
/* Create device with cpumask implicitly matching whole application */
ret = alloc_vdev(&vdev, "vdev_name_0", NULL);
SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
SPDK_CU_ASSERT_FATAL(!strcmp(spdk_cpuset_fmt(spdk_thread_get_cpumask(vdev->thread)), "f"));

View File

@ -253,7 +253,7 @@ function vhost_rpc() {
function assert_number() {
[[ "$1" =~ [0-9]+ ]] && return 0
error "Invalid or missing paramter: need number but got '$1'"
error "Invalid or missing parameter: need number but got '$1'"
return 1
}
@ -281,7 +281,7 @@ function vm_sshpass() {
function vm_num_is_valid() {
[[ "$1" =~ ^[0-9]+$ ]] && return 0
error "Invalid or missing paramter: vm number '$1'"
error "Invalid or missing parameter: vm number '$1'"
return 1
}
@ -1205,7 +1205,7 @@ function parse_fio_results() {
lat_divisor=1000
fi
# Horrific bash float point arithmetic oprations below.
# Horrific bash float point arithmetic operations below.
# Viewer discretion is advised.
iops=$(jq -r '[.read["iops"],.write["iops"]] | add' <<< $client_stats)
bw=$(jq -r '[.read["bw"],.write["bw"]] | add' <<< $client_stats)

View File

@ -38,7 +38,7 @@ function usage() {
echo " All VMs will run the same fio job when FIO executes."
echo " (no unique jobs for specific VMs)"
echo " --dry-run Don't perform any tests, run only and wait for enter to terminate"
echo " --no-shutdown Don't shutdown at the end but leave envirionment working"
echo " --no-shutdown Don't shutdown at the end but leave environment working"
echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
echo " NUM - VM number (mandatory)"
echo " OS - VM os disk path (optional)"
@ -234,7 +234,7 @@ for vm_num in $used_vms; do
done
if $dry_run; then
read -r -p "Enter to kill evething" xx
read -r -p "Enter to kill everything" xx
sleep 3
at_app_exit
exit 0

View File

@ -114,7 +114,7 @@ function cleanup_parted_config() {
}
function cleanup_kernel_vhost() {
notice "Cleaning kernel vhost configration"
notice "Cleaning kernel vhost configuration"
targetcli clearconfig confirm=True
cleanup_parted_config
}