scripts/vagrant: Drop OCSSD awareness from functional tests

This also translates into switching fully to upstream QEMU for the
vagrant setup.

This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:

-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..

Will still configure nvme controller with a single namespace attached
to foo.img.

This:

-b foo.img,,foo-ns1.img:foo-ns2.img

Will configure nvme controller with three namespaces.

Configuring nvme controller with no namespaces is possible via:

-b none ...

Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:

-b none,nvme,,true

This will create nvme controller with no namespaces but with CMB
enabled.

It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.

All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.

Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:

-b foo.img,nvme,2

is valid as long as the emulator is set to that of spdk-5.0.0's.

Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Michal Berger 2021-06-17 18:44:43 +02:00 committed by Tomasz Zawadzki
parent 1fd2af0150
commit ea71df4f48
5 changed files with 60 additions and 94 deletions

View File

@ -99,38 +99,6 @@ done
sync
if [ $(uname -s) = Linux ]; then
# OCSSD devices drivers don't support IO issues by kernel so
# detect OCSSD devices and block them (unbind from any driver).
# If test scripts want to use this device it needs to do this explicitly.
#
# If some OCSSD device is bound to other driver than nvme we won't be able to
# discover if it is OCSSD or not so load the kernel driver first.
while IFS= read -r -d '' dev; do
# Send Open Channel 2.0 Geometry opcode "0xe2" - not supported by NVMe device.
if nvme admin-passthru $dev --namespace-id=1 --data-len=4096 --opcode=0xe2 --read > /dev/null; then
bdf="$(basename $(readlink -e /sys/class/nvme/${dev#/dev/}/device))"
echo "INFO: blocking OCSSD device: $dev ($bdf)"
PCI_BLOCKED+=" $bdf"
OCSSD_PCI_DEVICES+=" $bdf"
fi
done < <(find /dev -maxdepth 1 -regex '/dev/nvme[0-9]+' -print0)
export OCSSD_PCI_DEVICES
# Now, bind blocked devices to pci-stub module. This will prevent
# automatic grabbing these devices when we add device/vendor ID to
# proper driver.
if [[ -n "$PCI_BLOCKED" ]]; then
# shellcheck disable=SC2097,SC2098
PCI_ALLOWED="$PCI_BLOCKED" \
PCI_BLOCKED="" \
DRIVER_OVERRIDE="pci-stub" \
./scripts/setup.sh
# Export our blocked list so it will take effect during next setup.sh
export PCI_BLOCKED
fi
run_test "setup.sh" "$rootdir/test/setup/test-setup.sh"
fi

View File

@ -117,7 +117,7 @@ def get_nvme_disk(disk, index)
nvme_disk = '/var/lib/libvirt/images/nvme_disk.img'
end
unless File.exist? (nvme_disk)
unless nvme_disk == "none" || File.exist?(nvme_disk)
puts 'If run with libvirt provider please execute create_nvme_img.sh'
end
@ -131,37 +131,65 @@ def setup_nvme_disk(libvirt, disk, index)
nvme_namespaces=(ENV['NVME_DISKS_NAMESPACES'] || "").split(',')
nvme_cmbs=(ENV['NVME_CMB'] || "").split(',')
nvme_pmrs=(ENV['NVME_PMR'] || "").split(',')
nvme_zns=(ENV['NVME_ZNS'] || "").split(',')
namespace_disks = []
pmr_cmdline = ""
nvme_controller = ""
# Define controller
nvme_controller = "nvme,id=#{nvme_disk_id},serial=1234#{index}"
# Gather all drives - each namespace requires separate drive
if nvme_namespaces[index].nil?
namespace_disks = namespace_disks + nvme_disk.split()
elsif !nvme_namespaces[index].nil? && !nvme_namespaces[index].match(/^[0-9]+$/)
namespace_disks = namespace_disks + nvme_disk.split() + nvme_namespaces[index].split(':')
elsif !nvme_namespaces[index].nil? && nvme_namespaces[index].match(/^[0-9]+$/)
# Compatibility with spdk-5.0.0 fork
libvirt.qemuargs :value => "-drive"
libvirt.qemuargs :value => "format=raw,file=#{nvme_disk},if=none,id=#{nvme_disk_id}"
libvirt.qemuargs :value => "-device"
nvme_drive = "nvme,drive=#{nvme_disk_id},serial=1234#{index}"
if !nvme_namespaces[index].nil? && nvme_namespaces[index] != "1"
nvme_drive << ",namespaces=#{nvme_namespaces[index]}"
if nvme_namespaces[index] == "1"
nvme_controller <<",drive=#{nvme_disk_id}"
else
nvme_controller <<",namespaces=#{nvme_namespaces[index]},drive=#{nvme_disk_id}"
end
end
pmr_cmdline = ""
if !nvme_cmbs[index].nil? && nvme_cmbs[index] == "true"
if !nvme_cmbs[index].nil? && nvme_cmbs[index] != ""
# Fix the size of the buffer to 128M
nvme_drive << ",cmb_size_mb=128"
nvme_controller << ",cmb_size_mb=128"
end
if !nvme_pmrs[index].nil?
if !nvme_pmrs[index].nil? && nvme_pmrs[index] != ""
pmr_path, pmr_size = nvme_pmrs[index].split(':')
if !File.exist?(pmr_path)
abort("#{pmr_path} does not exist, aborting")
end
if pmr_size.nil?
pmr_size = "16M"
end
nvme_drive << ",pmrdev=pmr#{index}"
nvme_controller << ",pmrdev=pmr#{index}"
pmr_cmdline = "memory-backend-file,id=pmr#{index},share=on,mem-path=#{pmr_path},size=#{pmr_size}"
end
libvirt.qemuargs :value => nvme_drive
libvirt.qemuargs :value => "-device"
libvirt.qemuargs :value => nvme_controller
if pmr_cmdline != ""
libvirt.qemuargs :value => "-object"
libvirt.qemuargs :value => pmr_cmdline
end
# Define all namespaces
namespace_disks.each_with_index { |disk, nsid|
if disk == "none"
next
end
zoned = nvme_zns[index].nil? ? "false" : "true"
libvirt.qemuargs :value => "-drive"
libvirt.qemuargs :value => "format=raw,file=#{disk},if=none,id=#{nvme_disk_id}-drive#{nsid}"
libvirt.qemuargs :value => "-device"
libvirt.qemuargs :value => "nvme-ns,drive=#{nvme_disk_id}-drive#{nsid},bus=#{nvme_disk_id},nsid=#{nsid + 1},zoned=#{zoned},logical_block_size=4096,physical_block_size=4096"
}
end
def setup_ssh(config)
@ -251,9 +279,7 @@ def setup_libvirt(config, vmcpu, vmram, distro)
# Loop to create all emulated disks set
emulated_nvme_types.each_with_index { |disk, index|
if disk == "nvme"
setup_nvme_disk(libvirt, disk, index)
end
}
# Add network interface for openstack tests

View File

@ -2,7 +2,6 @@
SYSTEM=$(uname -s)
size="1024M"
nvme_disk="/var/lib/libvirt/images/nvme_disk.img"
type="nvme"
function usage() {
echo "Usage: ${0##*/} [-s <disk_size>] [-n <backing file name>]"
@ -10,7 +9,6 @@ function usage() {
echo " for OCSSD default: 9G"
echo "-n <backing file name> backing file path with name"
echo " default: /var/lib/libvirt/images/nvme_disk.img"
echo "-t <type> default: nvme available: ocssd"
}
while getopts "s:n:t:h-:" opt; do
@ -26,9 +24,6 @@ while getopts "s:n:t:h-:" opt; do
n)
nvme_disk=$OPTARG
;;
t)
type=$OPTARG
;;
h)
usage
exit 0
@ -47,22 +42,7 @@ if [ "${SYSTEM}" != "Linux" ]; then
fi
WHICH_OS=$(lsb_release -i | awk '{print $3}')
case $type in
"nvme")
qemu-img create -f raw "$nvme_disk" $size
;;
"ocssd")
if [ $size == "1024M" ]; then
size="9G"
fi
fallocate -l $size "$nvme_disk"
touch "${nvme_disk}_ocssd_md"
;;
*)
echo "We support only nvme and ocssd disks types"
exit 1
;;
esac
case $WHICH_OS in
"Fedora")
@ -83,6 +63,3 @@ esac
chmod 777 "$nvme_disk"
chown $qemu_user_group "$nvme_disk"
if [ "$type" == "ocssd" ]; then
chown $qemu_user_group "${nvme_disk}_ocssd_md"
fi

View File

@ -30,10 +30,11 @@ display_help() {
echo " If no -b option is specified then this option defaults to emulating single"
echo " NVMe with 1 namespace and assumes path: /var/lib/libvirt/images/nvme_disk.img"
echo " -b option can be used multiple times for attaching multiple files to the VM"
echo " Parameters for -b option: <path>,<type>,<namespaces>,<cmb>,<pmr_file[:pmr_size]>"
echo " Parameters for -b option: <path>,<type>,<ns_path1[:ns_path1:...]>,<cmb>,<pmr_file[:pmr_size]>"
echo " Available types: nvme"
echo " Default pmr size: 16M"
echo " Default cmb: false"
echo " type, ns_path, cmb and pmr can be empty"
echo " -c Create all above disk, default 0"
echo " -H Use hugepages for allocating VM memory. Only for libvirt provider. Default: false."
echo " -u Use password authentication to the VM instead of SSH keys."
@ -56,8 +57,8 @@ display_help() {
echo " $0 -s 2048 -n 2 ubuntu16"
echo " $0 -rv freebsd"
echo " $0 fedora33"
echo " $0 -b /var/lib/libvirt/images/nvme1.img,nvme,1 fedora33"
echo " $0 -b /var/lib/libvirt/images/nvme5.img,nvme,5 fedora33"
echo " $0 -b /var/lib/libvirt/images/nvme1.img,nvme,/var/lib/libvirt/images/nvme1n1.img fedora33"
echo " $0 -b none fedora33"
echo
}
@ -187,21 +188,15 @@ if [ -z "$NVME_FILE" ]; then
else
TMP=""
for args in $NVME_FILE; do
while IFS=, read -r path type namespace cmb pmr; do
while IFS=, read -r path type namespace cmb pmr zns; do
TMP+="$path,"
if [ -z "$type" ]; then
type="nvme"
fi
if [[ -n $cmb ]]; then
NVME_CMB=${NVME_CMB:+$NVME_CMB,}$cmb
fi
if [[ -n $pmr ]]; then
NVME_PMR=${NVME_PMR:+$NVME_PMR,}$pmr
fi
NVME_CMB+="$cmb,"
NVME_PMR+="$pmr,"
NVME_ZNS+="$zns,"
NVME_DISKS_TYPE+="$type,"
if [ -z "$namespace" ] && [ -n "$SPDK_QEMU_EMULATOR" ]; then
namespace="1"
fi
NVME_DISKS_NAMESPACES+="$namespace,"
if [ ${NVME_AUTO_CREATE} = 1 ]; then
$SPDK_DIR/scripts/vagrant/create_nvme_img.sh -t $type -n $path
@ -224,6 +219,7 @@ if [ ${VERBOSE} = 1 ]; then
echo NVME_DISKS_NAMESPACES=$NVME_DISKS_NAMESPACES
echo NVME_CMB=$NVME_CMB
echo NVME_PMR=$NVME_PMR
echo NVME_ZNS=$NVME_ZNS
echo SPDK_VAGRANT_DISTRO=$SPDK_VAGRANT_DISTRO
echo SPDK_VAGRANT_VMCPU=$SPDK_VAGRANT_VMCPU
echo SPDK_VAGRANT_VMRAM=$SPDK_VAGRANT_VMRAM
@ -248,6 +244,7 @@ export COPY_SPDK_ARTIFACTS
export DEPLOY_TEST_VM
export NVME_CMB
export NVME_PMR
export NVME_ZNS
export NVME_DISKS_TYPE
export NVME_DISKS_NAMESPACES
export NVME_FILE

View File

@ -28,9 +28,7 @@ verify() {
}
denied() {
# Include OCSSD devices in the PCI_BLOCKED to make sure we don't unbind
# them from the pci-stub (see autotest.sh for details).
PCI_BLOCKED="$OCSSD_PCI_DEVICES ${devs[0]}" setup output config \
PCI_BLOCKED="${devs[0]}" setup output config \
| grep "Skipping denied controller at ${devs[0]}"
verify "${devs[0]}"
setup reset