test/vhost: add fio and lvol integrity cases with raid bdev

1. Use the Raid-0 bdev which based two Malloc bdevs to run
   the fio test.
2. Use the Raid-0 bdev which based one Nvme bdev and one
   malloc bdev to run fio test.
3. Creating lvol store on device Raid-0 which is based on
   two Malloc bdevs.

Change-Id: I3fb5e5d8e445a236a6a4e8c198a6f54a1f488989
Signed-off-by: Chen Wang <chenx.wang@intel.com>
Reviewed-on: https://review.gerrithub.io/425546
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Piotr Pelpliński <piotr.pelplinski@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Chen Wang 2018-09-14 15:07:18 +08:00 committed by Jim Harris
parent f7561e31de
commit efae4a1baa
3 changed files with 100 additions and 4 deletions

View File

@ -114,16 +114,25 @@ for vm_conf in ${vms[@]}; do
while IFS=':' read -ra disks; do
for disk in "${disks[@]}"; do
notice "Create a lvol store on RaidBdev2 and then a lvol bdev on the lvol store"
if [[ $disk == "RaidBdev2" ]]; then
ls_guid=$($rpc_py construct_lvol_store RaidBdev2 lvs_0 -c 4194304)
free_mb=$(get_lvs_free_mb "$ls_guid")
based_disk=$($rpc_py construct_lvol_bdev -u $ls_guid lbd_0 $free_mb)
else
based_disk="$disk"
fi
if [[ "$test_type" == "spdk_vhost_blk" ]]; then
disk=${disk%%_*}
notice "Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
$rpc_py construct_vhost_blk_controller naa.$disk.${conf[0]} $disk
$rpc_py construct_vhost_blk_controller naa.$disk.${conf[0]} $based_disk
else
notice "Creating controller naa.$disk.${conf[0]}"
$rpc_py construct_vhost_scsi_controller naa.$disk.${conf[0]}
notice "Adding device (0) to naa.$disk.${conf[0]}"
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $based_disk
fi
done
done <<< "${conf[2]}"
@ -147,13 +156,19 @@ if [[ $test_type == "spdk_vhost_scsi" ]]; then
IFS=',' read -ra conf <<< "$vm_conf"
while IFS=':' read -ra disks; do
for disk in "${disks[@]}"; do
# For RaidBdev2, the lvol bdev on RaidBdev2 is being used.
if [[ $disk == "RaidBdev2" ]]; then
based_disk="lvs_0/lbd_0"
else
based_disk="$disk"
fi
notice "Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
$rpc_py remove_vhost_scsi_target naa.$disk.${conf[0]} 0
sleep 0.1
notice "Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $disk
$rpc_py add_vhost_scsi_lun naa.$disk.${conf[0]} 0 $based_disk
done
done <<< "${conf[2]}"
unset IFS;
@ -228,6 +243,11 @@ if ! $no_shutdown; then
fi
$rpc_py remove_vhost_controller naa.$disk.${conf[0]}
if [[ $disk == "RaidBdev2" ]]; then
notice "Removing lvol bdev and lvol store"
$rpc_py destroy_lvol_bdev lvs_0/lbd_0
$rpc_py destroy_lvol_store -l lvs_0
fi
done
done <<< "${conf[2]}"
done

View File

@ -36,6 +36,82 @@
"num_blocks": 32768
},
"method": "construct_malloc_bdev"
},
{
"params": {
"name": "Malloc2",
"num_blocks": 131072,
"block_size": 512
},
"method": "construct_malloc_bdev"
},
{
"params": {
"name": "Malloc3",
"num_blocks": 131072,
"block_size": 512
},
"method": "construct_malloc_bdev"
},
{
"params": {
"name": "Malloc4",
"num_blocks": 131072,
"block_size": 512
},
"method": "construct_malloc_bdev"
},
{
"params": {
"name": "Malloc5",
"num_blocks": 131072,
"block_size": 512
},
"method": "construct_malloc_bdev"
},
{
"params": {
"name": "Malloc6",
"num_blocks": 131072,
"block_size": 512
},
"method": "construct_malloc_bdev"
},
{
"method": "construct_raid_bdev",
"params": {
"name": "RaidBdev0",
"strip_size": 128,
"raid_level": 0,
"base_bdevs": [
"Malloc2",
"Malloc3"
]
}
},
{
"method": "construct_raid_bdev",
"params": {
"name": "RaidBdev1",
"strip_size": 128,
"raid_level": 0,
"base_bdevs": [
"Nvme0n1p2",
"Malloc4"
]
}
},
{
"method": "construct_raid_bdev",
"params": {
"name": "RaidBdev2",
"strip_size": 128,
"raid_level": 0,
"base_bdevs": [
"Malloc5",
"Malloc6"
]
}
}
]
},

View File

@ -91,7 +91,7 @@ case $1 in
-i|--integrity)
echo 'Running SCSI integrity suite...'
run_test case $WORKDIR/fiotest/autotest.sh -x --fio-bin=$FIO_BIN \
--vm=0,$VM_IMAGE,Nvme0n1p0:Nvme0n1p1:Nvme0n1p2:Nvme0n1p3 \
--vm=0,$VM_IMAGE,Nvme0n1p0:RaidBdev0:RaidBdev1:RaidBdev2 \
--test-type=spdk_vhost_scsi \
--fio-job=$WORKDIR/common/fio_jobs/default_integrity.job
report_test_completion "nightly_vhost_integrity"