test/vhost: Use sshpass instead of generating keys

This is much easier.

Change-Id: I4ae5f2f5b9393f65d07f39f03fa30628a40b01cf
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/459304
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Ben Walker 2019-06-04 15:00:09 -07:00 committed by Darek Stojaczyk
parent 77c1f90e98
commit 97df5bcc63
14 changed files with 66 additions and 90 deletions

View File

@ -62,7 +62,7 @@ vm_wait_for_boot 300 3
# Run the fio workload remotely
vm_scp 3 $testdir/nvmf_vhost_fio.job 127.0.0.1:/root/nvmf_vhost_fio.job
vm_ssh 3 "fio /root/nvmf_vhost_fio.job"
vm_exec 3 "fio /root/nvmf_vhost_fio.job"
vm_shutdown_all
trap - SIGINT SIGTERM EXIT

View File

@ -10,14 +10,6 @@ if [ ! -d $QEMU_PREFIX ]; then
fi
echo "Using qemu folder $QEMU_PREFIX"
# SSH key file
: ${SPDK_VHOST_SSH_KEY_FILE="$(readlink -e $HOME/.ssh/spdk_vhost_id_rsa)"}
if [[ ! -r "$SPDK_VHOST_SSH_KEY_FILE" ]]; then
error "Could not find SSH key file $SPDK_VHOST_SSH_KEY_FILE"
exit 1
fi
echo "Using SSH key file $SPDK_VHOST_SSH_KEY_FILE"
VM_BASE_DIR="$TEST_DIR/vms"
mkdir -p $TEST_DIR
@ -312,42 +304,22 @@ function vm_fio_socket()
cat $vm_dir/fio_socket
}
function vm_create_ssh_config()
{
local ssh_config="$VM_BASE_DIR/ssh_config"
if [[ ! -f $ssh_config ]]; then
(
echo "Host *"
echo " ControlPersist=10m"
echo " ConnectTimeout=1"
echo " Compression=no"
echo " ControlMaster=auto"
echo " UserKnownHostsFile=/dev/null"
echo " StrictHostKeyChecking=no"
echo " User root"
echo " ControlPath=/tmp/%r@%h:%p.ssh"
echo ""
) > $ssh_config
# Control path created at /tmp because of live migration test case 3.
# In case of using sshfs share for the test - control path cannot be
# on share because remote server will fail on ssh commands.
fi
}
# Execute ssh command on given VM
# Execute command on given VM
# param $1 virtual machine number
#
function vm_ssh()
function vm_exec()
{
vm_num_is_valid $1 || return 1
vm_create_ssh_config
local ssh_config="$VM_BASE_DIR/ssh_config"
local ssh_cmd="ssh -i $SPDK_VHOST_SSH_KEY_FILE -F $ssh_config \
-p $(vm_ssh_socket $1) $VM_SSH_OPTIONS 127.0.0.1"
local vm_num="$1"
shift
$ssh_cmd "$@"
sshpass -p root ssh \
-o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no \
-o User=root \
-p $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS 127.0.0.1 \
"$@"
}
# Execute scp command on given VM
@ -356,14 +328,16 @@ function vm_ssh()
function vm_scp()
{
vm_num_is_valid $1 || return 1
vm_create_ssh_config
local ssh_config="$VM_BASE_DIR/ssh_config"
local scp_cmd="scp -i $SPDK_VHOST_SSH_KEY_FILE -F $ssh_config \
-P $(vm_ssh_socket $1) "
local vm_num="$1"
shift
$scp_cmd "$@"
sshpass -p root scp \
-o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no \
-o User=root \
-P $(vm_ssh_socket $vm_num) $VM_SSH_OPTIONS \
"$@"
}
@ -406,9 +380,9 @@ function vm_os_booted()
return 1
fi
if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_ssh $1 "true" 2>/dev/null; then
if ! VM_SSH_OPTIONS="-o ControlMaster=no" vm_exec $1 "true" 2>/dev/null; then
# Shutdown existing master. Ignore errors as it might not exist.
VM_SSH_OPTIONS="-O exit" vm_ssh $1 "true" 2>/dev/null
VM_SSH_OPTIONS="-O exit" vm_exec $1 "true" 2>/dev/null
return 1
fi
@ -437,7 +411,7 @@ function vm_shutdown()
# "fail" due to shutdown
notice "Shutting down virtual machine $vm_dir"
set +e
vm_ssh $1 "nohup sh -c 'shutdown -h -P now'" || true
vm_exec $1 "nohup sh -c 'shutdown -h -P now'" || true
notice "VM$1 is shutting down - wait a while to complete"
set -e
}
@ -970,9 +944,9 @@ function vm_wait_for_boot()
notice "VM$vm_num ready"
#Change Timeout for stopping services to prevent lengthy powerdowns
#Check that remote system is not Cygwin in case of Windows VMs
local vm_os=$(vm_ssh $vm_num "uname -o")
local vm_os=$(vm_exec $vm_num "uname -o")
if [[ "$vm_os" != "Cygwin" ]]; then
vm_ssh $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
vm_exec $vm_num "echo 'DefaultTimeoutStopSec=10' >> /etc/systemd/system.conf; systemctl daemon-reexec"
fi
done
@ -1002,10 +976,10 @@ function vm_start_fio_server()
for vm_num in $@; do
notice "Starting fio server on VM$vm_num"
if [[ $fio_bin != "" ]]; then
cat $fio_bin | vm_ssh $vm_num 'cat > /root/fio; chmod +x /root/fio'
vm_ssh $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
cat $fio_bin | vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio'
vm_exec $vm_num /root/fio $readonly --eta=never --server --daemonize=/root/fio.pid
else
vm_ssh $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
vm_exec $vm_num fio $readonly --eta=never --server --daemonize=/root/fio.pid
fi
done
}
@ -1022,7 +996,7 @@ function vm_check_scsi_location()
fi; \
done'
SCSI_DISK="$(echo "$script" | vm_ssh $1 bash -s)"
SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
if [[ -z "$SCSI_DISK" ]]; then
error "no test disk found!"
@ -1037,14 +1011,14 @@ function vm_reset_scsi_devices()
{
for disk in "${@:2}"; do
notice "VM$1 Performing device reset on disk $disk"
vm_ssh $1 sg_reset /dev/$disk -vNd
vm_exec $1 sg_reset /dev/$disk -vNd
done
}
function vm_check_blk_location()
{
local script='shopt -s nullglob; cd /sys/block; echo vd*'
SCSI_DISK="$(echo "$script" | vm_ssh $1 bash -s)"
SCSI_DISK="$(echo "$script" | vm_exec $1 bash -s)"
if [[ -z "$SCSI_DISK" ]]; then
error "no blk test disk found!"
@ -1097,17 +1071,17 @@ function run_fio()
local vm_num=${vm%%:*}
local vmdisks=${vm#*:}
sed "s@filename=@filename=$vmdisks@" $job_file | vm_ssh $vm_num "cat > /root/$job_fname"
sed "s@filename=@filename=$vmdisks@" $job_file | vm_exec $vm_num "cat > /root/$job_fname"
fio_disks+="127.0.0.1:$(vm_fio_socket $vm_num):$vmdisks,"
vm_ssh $vm_num cat /root/$job_fname
vm_exec $vm_num cat /root/$job_fname
if ! $run_server_mode; then
if [[ ! -z "$fio_bin" ]]; then
cat $fio_bin | vm_ssh $vm_num 'cat > /root/fio; chmod +x /root/fio'
cat $fio_bin | vm_exec $vm_num 'cat > /root/fio; chmod +x /root/fio'
fi
notice "Running local fio on VM $vm_num"
vm_ssh $vm_num "nohup /root/fio /root/$job_fname 1>/root/$job_fname.out 2>/root/$job_fname.out </dev/null & echo \$! > /root/fio.pid"
vm_exec $vm_num "nohup /root/fio /root/$job_fname 1>/root/$job_fname.out 2>/root/$job_fname.out </dev/null & echo \$! > /root/fio.pid"
fi
done

View File

@ -193,7 +193,7 @@ for vm_num in $used_vms; do
host_name="VM-$vm_num"
notice "Setting up hostname: $host_name"
vm_ssh $vm_num "hostname $host_name"
vm_exec $vm_num "hostname $host_name"
vm_start_fio_server $fio_bin $readonly $vm_num
if [[ "$test_type" == "spdk_vhost_scsi" ]]; then

View File

@ -119,7 +119,7 @@ function vms_prepare() {
host_name="VM-${vm_num}-${!qemu_mask_param}"
notice "Setting up hostname: $host_name"
vm_ssh $vm_num "hostname $host_name"
vm_exec $vm_num "hostname $host_name"
vm_start_fio_server --fio-bin=$fio_bin $readonly $vm_num
done
}
@ -127,7 +127,7 @@ function vms_prepare() {
function vms_reboot_all() {
notice "Rebooting all vms "
for vm_num in $1; do
vm_ssh $vm_num "reboot" || true
vm_exec $vm_num "reboot" || true
while vm_os_booted $vm_num; do
sleep 0.5
done

View File

@ -88,7 +88,7 @@ vm_wait_for_boot 300 0
# Run tests on VM
vm_scp 0 $testdir/integrity_vm.sh root@127.0.0.1:/root/integrity_vm.sh
vm_ssh 0 "~/integrity_vm.sh $ctrl_type \"$vm_fs\""
vm_exec 0 "~/integrity_vm.sh $ctrl_type \"$vm_fs\""
notice "Shutting down virtual machine..."
vm_shutdown_all

View File

@ -230,7 +230,7 @@ for vm_num in $used_vms; do
qemu_mask_param="VM_${vm_num}_qemu_mask"
host_name="VM-$vm_num-${!qemu_mask_param}"
vm_ssh $vm_num "hostname $host_name"
vm_exec $vm_num "hostname $host_name"
vm_start_fio_server $fio_bin $vm_num
if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then

View File

@ -81,7 +81,7 @@ function migration_tc1()
# Check if fio is still running before migration
if ! is_fio_running $incoming_vm; then
vm_ssh $incoming_vm "cat /root/$(basename ${job_file}).out"
vm_exec $incoming_vm "cat /root/$(basename ${job_file}).out"
error "FIO is not running before migration: process crashed or finished too early"
fi
@ -90,7 +90,7 @@ function migration_tc1()
# Check if fio is still running after migration
if ! is_fio_running $target_vm; then
vm_ssh $target_vm "cat /root/$(basename ${job_file}).out"
vm_exec $target_vm "cat /root/$(basename ${job_file}).out"
error "FIO is not running after migration: process crashed or finished too early"
fi
@ -105,7 +105,7 @@ function migration_tc1()
done
notice "Fio result is:"
vm_ssh $target_vm "cat /root/$(basename ${job_file}).out"
vm_exec $target_vm "cat /root/$(basename ${job_file}).out"
notice "Migration DONE"

View File

@ -176,7 +176,7 @@ function migration_tc2()
# Check if fio is still running before migration
if ! is_fio_running $incoming_vm; then
vm_ssh $incoming_vm "cat /root/$(basename ${job_file}).out"
vm_exec $incoming_vm "cat /root/$(basename ${job_file}).out"
error "FIO is not running before migration: process crashed or finished too early"
fi
@ -185,7 +185,7 @@ function migration_tc2()
# Check if fio is still running after migration
if ! is_fio_running $target_vm; then
vm_ssh $target_vm "cat /root/$(basename ${job_file}).out"
vm_exec $target_vm "cat /root/$(basename ${job_file}).out"
error "FIO is not running after migration: process crashed or finished too early"
fi
@ -200,7 +200,7 @@ function migration_tc2()
done
notice "Fio result is:"
vm_ssh $target_vm "cat /root/$(basename ${job_file}).out"
vm_exec $target_vm "cat /root/$(basename ${job_file}).out"
migration_tc2_cleanup_vhost_config
notice "Migration TC2 SUCCESS"

View File

@ -27,10 +27,12 @@ fi
function ssh_remote()
{
local ssh_cmd="ssh -i $SPDK_VHOST_SSH_KEY_FILE \
local ssh_cmd="sshpass -p root ssh \
-o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no -o ControlMaster=auto \
root@$1"
-o StrictHostKeyChecking=no \
-o ControlMaster=auto \
-o User=root \
$1"
shift
$ssh_cmd "$@"

View File

@ -58,7 +58,7 @@ if ! vm_os_booted $target_vm; then
fi
if ! is_fio_running $target_vm; then
vm_ssh $target_vm "cat /root/migration-tc3.job.out"
vm_exec $target_vm "cat /root/migration-tc3.job.out"
error "FIO is not running on remote server after migration!"
fi
@ -73,7 +73,7 @@ while is_fio_running $target_vm; do
done
notice "FIO result after migration:"
vm_ssh $target_vm "cat /root/migration-tc3.job.out"
vm_exec $target_vm "cat /root/migration-tc3.job.out"
host_2_cleanup_vhost
echo "DONE" > $share_dir/DONE

View File

@ -126,7 +126,7 @@ function is_fio_running()
local shell_restore_x="$( [[ "$-" =~ x ]] && echo 'set -x' )"
set +x
if vm_ssh $1 'kill -0 $(cat /root/fio.pid)'; then
if vm_exec $1 'kill -0 $(cat /root/fio.pid)'; then
local ret=0
else
local ret=1

View File

@ -325,7 +325,7 @@ fio_disks=""
for vm_num in $used_vms; do
vm_dir=$VM_BASE_DIR/$vm_num
host_name="VM-$vm_num"
vm_ssh $vm_num "hostname $host_name"
vm_exec $vm_num "hostname $host_name"
vm_start_fio_server $fio_bin $vm_num
if [[ "$ctrl_type" == "spdk_vhost_scsi" ]]; then
@ -338,9 +338,9 @@ for vm_num in $used_vms; do
if [[ -n "$vm_throttle" ]]; then
block=$(printf '%s' $SCSI_DISK)
major_minor=$(vm_ssh "$vm_num" "cat /sys/block/$block/dev")
vm_ssh "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.read_iops_device"
vm_ssh "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.write_iops_device"
major_minor=$(vm_exec "$vm_num" "cat /sys/block/$block/dev")
vm_exec "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.read_iops_device"
vm_exec "$vm_num" "echo \"$major_minor $vm_throttle\" > /sys/fs/cgroup/blkio/blkio.throttle.write_iops_device"
fi
fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
@ -359,7 +359,7 @@ for i in $(seq 1 $fio_iterations); do
mkdir -p $TEST_DIR/fio_results/sar_stats
pids=""
for vm_num in $used_vms; do
vm_ssh "$vm_num" "mkdir -p /root/sar; sar -P ALL $vm_sar_interval $vm_sar_count >> /root/sar/sar_stats_VM${vm_num}_run${i}.txt" &
vm_exec "$vm_num" "mkdir -p /root/sar; sar -P ALL $vm_sar_interval $vm_sar_count >> /root/sar/sar_stats_VM${vm_num}_run${i}.txt" &
pids+=" $!"
done
for j in $pids; do

View File

@ -91,7 +91,7 @@ function blk_ro_tc1()
vm_run $vm_no
vm_wait_for_boot 300 $vm_no
notice "Preparing partition and file on guest VM"
vm_ssh $vm_no "bash -s" < $testdir/disabled_readonly_vm.sh
vm_exec $vm_no "bash -s" < $testdir/disabled_readonly_vm.sh
sleep 1
vm_shutdown_all
@ -103,7 +103,7 @@ function blk_ro_tc1()
vm_run $vm_no
vm_wait_for_boot 300 $vm_no
notice "Testing readonly feature on guest VM"
vm_ssh $vm_no "bash -s" < $testdir/enabled_readonly_vm.sh
vm_exec $vm_no "bash -s" < $testdir/enabled_readonly_vm.sh
sleep 3
vm_shutdown_all
@ -115,7 +115,7 @@ function blk_ro_tc1()
vm_run $vm_no
vm_wait_for_boot 300 $vm_no
notice "Removing partition and file from test disk on guest VM"
vm_ssh $vm_no "bash -s" < $testdir/delete_partition_vm.sh
vm_exec $vm_no "bash -s" < $testdir/delete_partition_vm.sh
sleep 1
vm_shutdown_all

View File

@ -92,14 +92,14 @@ vm_wait_for_boot 300 $vm_no
timing_exit setup_vm
timing_enter run_vm_cmd
vm_ssh $vm_no "parted -s /dev/sda mkpart primary 10GB 100%; partprobe; sleep 0.1;"
vm_ssh $vm_no "mkfs.ext4 -F /dev/sda2; mkdir -p /mnt/sda2test; mount /dev/sda2 /mnt/sda2test;"
vm_ssh $vm_no "fio --name=integrity --bsrange=4k-512k --iodepth=128 --numjobs=1 --direct=1 \
vm_exec $vm_no "parted -s /dev/sda mkpart primary 10GB 100%; partprobe; sleep 0.1;"
vm_exec $vm_no "mkfs.ext4 -F /dev/sda2; mkdir -p /mnt/sda2test; mount /dev/sda2 /mnt/sda2test;"
vm_exec $vm_no "fio --name=integrity --bsrange=4k-512k --iodepth=128 --numjobs=1 --direct=1 \
--thread=1 --group_reporting=1 --rw=randrw --rwmixread=70 --filename=/mnt/sda2test/test_file \
--verify=md5 --do_verify=1 --verify_backlog=1024 --fsync_on_close=1 --runtime=20 \
--time_based=1 --size=1024m"
vm_ssh $vm_no "umount /mnt/sda2test; rm -rf /mnt/sda2test"
alignment_offset=$(vm_ssh $vm_no "cat /sys/block/sda/sda1/alignment_offset")
vm_exec $vm_no "umount /mnt/sda2test; rm -rf /mnt/sda2test"
alignment_offset=$(vm_exec $vm_no "cat /sys/block/sda/sda1/alignment_offset")
echo "alignment_offset: $alignment_offset"
timing_exit run_vm_cmd