2018-01-02 21:44:48 +00:00
|
|
|
# Common shell utility functions
|
|
|
|
|
2020-11-25 20:31:42 +00:00
|
|
|
# Check if PCI device is in PCI_ALLOWED and not in PCI_BLOCKED
|
2019-02-07 12:28:49 +00:00
|
|
|
# Env:
|
2020-11-25 20:31:42 +00:00
|
|
|
# if PCI_ALLOWED is empty assume device is allowed
|
|
|
|
# if PCI_BLOCKED is empty assume device is NOT blocked
|
2019-02-07 12:28:49 +00:00
|
|
|
# Params:
|
|
|
|
# $1 - PCI BDF
|
2019-02-07 12:31:06 +00:00
|
|
|
function pci_can_use() {
|
2019-02-07 12:28:49 +00:00
|
|
|
local i
|
|
|
|
|
|
|
|
# The '\ ' part is important
|
2020-11-25 20:31:42 +00:00
|
|
|
if [[ " $PCI_BLOCKED " =~ \ $1\ ]]; then
|
2019-02-07 12:28:49 +00:00
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
2020-11-25 20:31:42 +00:00
|
|
|
if [[ -z "$PCI_ALLOWED" ]]; then
|
|
|
|
#no allow list specified, bind all devices
|
2019-02-07 12:28:49 +00:00
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
|
2020-11-25 20:31:42 +00:00
|
|
|
for i in $PCI_ALLOWED; do
|
2020-05-07 11:27:06 +00:00
|
|
|
if [ "$i" == "$1" ]; then
|
2019-02-07 12:28:49 +00:00
|
|
|
return 0
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
cache_pci_init() {
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
local -gA pci_bus_cache
|
2020-05-06 21:23:46 +00:00
|
|
|
local -gA pci_ids_vendor
|
|
|
|
local -gA pci_ids_device
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
|
|
|
|
[[ -z ${pci_bus_cache[*]} || $CMD == reset ]] || return 1
|
|
|
|
|
|
|
|
pci_bus_cache=()
|
2020-05-06 21:23:46 +00:00
|
|
|
pci_bus_ids_vendor=()
|
|
|
|
pci_bus_ids_device=()
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
cache_pci() {
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
local pci=$1 class=$2 vendor=$3 device=$4
|
|
|
|
|
|
|
|
if [[ -n $class ]]; then
|
2020-05-07 11:27:06 +00:00
|
|
|
class=0x${class/0x/}
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
pci_bus_cache["$class"]="${pci_bus_cache["$class"]:+${pci_bus_cache["$class"]} }$pci"
|
|
|
|
fi
|
|
|
|
if [[ -n $vendor && -n $device ]]; then
|
2020-05-07 11:27:06 +00:00
|
|
|
vendor=0x${vendor/0x/} device=0x${device/0x/}
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
pci_bus_cache["$vendor:$device"]="${pci_bus_cache["$vendor:$device"]:+${pci_bus_cache["$vendor:$device"]} }$pci"
|
2020-05-06 21:23:46 +00:00
|
|
|
|
|
|
|
pci_ids_vendor["$pci"]=$vendor
|
|
|
|
pci_ids_device["$pci"]=$device
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
cache_pci_bus_sysfs() {
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
[[ -e /sys/bus/pci/devices ]] || return 1
|
|
|
|
|
|
|
|
cache_pci_init || return 0
|
|
|
|
|
|
|
|
local pci
|
|
|
|
local class vendor device
|
|
|
|
|
|
|
|
for pci in /sys/bus/pci/devices/*; do
|
2020-05-07 11:27:06 +00:00
|
|
|
class=$(< "$pci/class") vendor=$(< "$pci/vendor") device=$(< "$pci/device")
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
cache_pci "${pci##*/}" "$class" "$vendor" "$device"
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
cache_pci_bus_lspci() {
|
|
|
|
hash lspci 2> /dev/null || return 1
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
|
|
|
|
cache_pci_init || return 0
|
|
|
|
|
|
|
|
local dev
|
|
|
|
while read -ra dev; do
|
|
|
|
dev=("${dev[@]//\"/}")
|
|
|
|
# lspci splits ls byte of the class (prog. interface) into a separate
|
|
|
|
# field if it's != 0. Look for it and normalize the value to fit with
|
|
|
|
# what kernel exposes under sysfs.
|
|
|
|
if [[ ${dev[*]} =~ -p([0-9]+) ]]; then
|
|
|
|
dev[1]+=${BASH_REMATCH[1]}
|
|
|
|
else
|
|
|
|
dev[1]+=00
|
|
|
|
fi
|
|
|
|
# pci class vendor device
|
|
|
|
cache_pci "${dev[@]::4}"
|
|
|
|
done < <(lspci -Dnmm)
|
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
cache_pci_bus_pciconf() {
|
|
|
|
hash pciconf 2> /dev/null || return 1
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
|
|
|
|
cache_pci_init || return 0
|
|
|
|
|
|
|
|
local class vd vendor device
|
|
|
|
local pci domain bus device function
|
|
|
|
|
|
|
|
while read -r pci class _ vd _; do
|
2020-05-07 11:27:06 +00:00
|
|
|
IFS=":" read -r domain bus device function _ <<< "${pci##*pci}"
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
pci=$(printf '%04x:%02x:%02x:%x' \
|
|
|
|
"$domain" "$bus" "$device" "$function")
|
2020-05-07 11:27:06 +00:00
|
|
|
class=$(printf '0x%06x' $((class)))
|
|
|
|
vendor=$(printf '0x%04x' $((vd & 0xffff)))
|
|
|
|
device=$(printf '0x%04x' $(((vd >> 16) & 0xffff)))
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
|
|
|
|
cache_pci "$pci" "$class" "$vendor" "$device"
|
|
|
|
done < <(pciconf -l)
|
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
cache_pci_bus() {
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
case "$(uname -s)" in
|
|
|
|
Linux) cache_pci_bus_lspci || cache_pci_bus_sysfs ;;
|
|
|
|
FreeBSD) cache_pci_bus_pciconf ;;
|
|
|
|
esac
|
|
|
|
}
|
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
iter_all_pci_sysfs() {
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
cache_pci_bus_sysfs || return 1
|
|
|
|
|
|
|
|
# default to class of the nvme devices
|
|
|
|
local find=${1:-0x010802} findx=$2
|
|
|
|
local pci pcis
|
|
|
|
|
|
|
|
[[ -n ${pci_bus_cache["$find"]} ]] || return 0
|
2020-05-07 11:27:06 +00:00
|
|
|
read -ra pcis <<< "${pci_bus_cache["$find"]}"
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
if ((findx)); then
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
printf '%s\n' "${pcis[@]::findx}"
|
|
|
|
else
|
|
|
|
printf '%s\n' "${pcis[@]}"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2020-11-25 20:31:42 +00:00
|
|
|
# This function will ignore PCI PCI_ALLOWED and PCI_BLOCKED
|
2019-02-07 12:31:06 +00:00
|
|
|
function iter_all_pci_class_code() {
|
2019-10-24 13:58:04 +00:00
|
|
|
local class
|
|
|
|
local subclass
|
|
|
|
local progif
|
|
|
|
class="$(printf %02x $((0x$1)))"
|
|
|
|
subclass="$(printf %02x $((0x$2)))"
|
|
|
|
progif="$(printf %02x $((0x$3)))"
|
2018-01-02 21:44:48 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
if hash lspci &> /dev/null; then
|
2018-01-02 21:44:48 +00:00
|
|
|
if [ "$progif" != "00" ]; then
|
2020-05-07 11:27:06 +00:00
|
|
|
lspci -mm -n -D \
|
|
|
|
| grep -i -- "-p${progif}" \
|
|
|
|
| awk -v cc="\"${class}${subclass}\"" -F " " \
|
|
|
|
'{if (cc ~ $2) print $1}' | tr -d '"'
|
2018-01-02 21:44:48 +00:00
|
|
|
else
|
2020-05-07 11:27:06 +00:00
|
|
|
lspci -mm -n -D \
|
|
|
|
| awk -v cc="\"${class}${subclass}\"" -F " " \
|
|
|
|
'{if (cc ~ $2) print $1}' | tr -d '"'
|
2018-01-02 21:44:48 +00:00
|
|
|
fi
|
2020-05-07 11:27:06 +00:00
|
|
|
elif hash pciconf &> /dev/null; then
|
|
|
|
local addr=($(pciconf -l | grep -i "class=0x${class}${subclass}${progif}" \
|
|
|
|
| cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
|
2018-01-02 21:44:48 +00:00
|
|
|
printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
|
2020-05-07 11:27:06 +00:00
|
|
|
elif iter_all_pci_sysfs "$(printf '0x%06x' $((0x$progif | 0x$subclass << 8 | 0x$class << 16)))"; then
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
:
|
2018-01-02 21:44:48 +00:00
|
|
|
else
|
2020-03-11 05:30:27 +00:00
|
|
|
echo "Missing PCI enumeration utility" >&2
|
2018-01-02 21:44:48 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2020-11-25 20:31:42 +00:00
|
|
|
# This function will ignore PCI PCI_ALLOWED and PCI_BLOCKED
|
2019-02-07 12:31:06 +00:00
|
|
|
function iter_all_pci_dev_id() {
|
2019-10-24 13:58:04 +00:00
|
|
|
local ven_id
|
|
|
|
local dev_id
|
|
|
|
ven_id="$(printf %04x $((0x$1)))"
|
|
|
|
dev_id="$(printf %04x $((0x$2)))"
|
2018-01-02 21:44:48 +00:00
|
|
|
|
2020-05-07 11:27:06 +00:00
|
|
|
if hash lspci &> /dev/null; then
|
2018-01-02 21:44:48 +00:00
|
|
|
lspci -mm -n -D | awk -v ven="\"$ven_id\"" -v dev="\"${dev_id}\"" -F " " \
|
|
|
|
'{if (ven ~ $3 && dev ~ $4) print $1}' | tr -d '"'
|
2020-05-07 11:27:06 +00:00
|
|
|
elif hash pciconf &> /dev/null; then
|
|
|
|
local addr=($(pciconf -l | grep -i "chip=0x${dev_id}${ven_id}" \
|
|
|
|
| cut -d$'\t' -f1 | sed -e 's/^[a-zA-Z0-9_]*@pci//g' | tr ':' ' '))
|
2018-01-02 21:44:48 +00:00
|
|
|
printf "%04x:%02x:%02x:%x\n" ${addr[0]} ${addr[1]} ${addr[2]} ${addr[3]}
|
scripts/common: Introduce cache for the pci devices
Expose a cache of pci devices in form of an assoc array that could be
looked up during the runtime of a script like setup.sh.
In case of setup.sh, caching speeds up execution quite visibly:
config run, no caching:
real 0m4.488s
user 0m1.440s
sys 0m1.260s
config run, caching in use:
real 0m2.876s
user 0m0.365s
sys 0m0.420s
Note that for initial config runs, binding controllers to proper
drivers is the actual bottleneck.
status run, no caching:
real 0m1.877s
user 0m1.252s
sys 0m0.984s
status run, caching in use:
real 0m0.371s
user 0m0.242s
sys 0m0.204s
reset run, no caching:
real 0m2.559s
user 0m1.409s
sys 0m1.322s
reset run, caching in use:
real 0m0.960s
user 0m0.432s
sys 0m0.419s
Additionally, in case common tools, e.g. lspci, are missing, fallback to
sysfs to pick all needed devices from the pci bus. Targeted for Linux
systems only.
Change-Id: Ib69ef724b9f09eca0cbb9b88f1c363edc1efd5dc
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1845
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-04-14 13:43:32 +00:00
|
|
|
elif iter_all_pci_sysfs "0x$ven_id:0x$dev_id"; then
|
|
|
|
:
|
2018-01-02 21:44:48 +00:00
|
|
|
else
|
2020-03-11 05:30:27 +00:00
|
|
|
echo "Missing PCI enumeration utility" >&2
|
2018-01-02 21:44:48 +00:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
}
|
2019-02-07 12:31:06 +00:00
|
|
|
|
|
|
|
function iter_pci_dev_id() {
|
|
|
|
local bdf=""
|
|
|
|
|
|
|
|
for bdf in $(iter_all_pci_dev_id "$@"); do
|
|
|
|
if pci_can_use "$bdf"; then
|
|
|
|
echo "$bdf"
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2020-11-25 20:31:42 +00:00
|
|
|
# This function will filter out PCI devices using PCI_ALLOWED and PCI_BLOCKED
|
2019-02-07 12:31:06 +00:00
|
|
|
# See function pci_can_use()
|
|
|
|
function iter_pci_class_code() {
|
|
|
|
local bdf=""
|
|
|
|
|
|
|
|
for bdf in $(iter_all_pci_class_code "$@"); do
|
|
|
|
if pci_can_use "$bdf"; then
|
|
|
|
echo "$bdf"
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
2020-06-23 08:25:15 +00:00
|
|
|
|
|
|
|
function nvme_in_userspace() {
|
|
|
|
# Check used drivers. If it's not vfio-pci or uio-pci-generic
|
2020-11-25 20:31:42 +00:00
|
|
|
# then most likely PCI_ALLOWED option was used for setup.sh
|
2020-06-23 08:25:15 +00:00
|
|
|
# and we do not want to use that disk.
|
|
|
|
|
|
|
|
local bdf bdfs
|
|
|
|
local nvmes
|
|
|
|
|
|
|
|
if [[ -n ${pci_bus_cache["0x010802"]} ]]; then
|
|
|
|
nvmes=(${pci_bus_cache["0x010802"]})
|
|
|
|
else
|
|
|
|
nvmes=($(iter_pci_class_code 01 08 02))
|
|
|
|
fi
|
|
|
|
|
|
|
|
for bdf in "${nvmes[@]}"; do
|
|
|
|
if [[ -e /sys/bus/pci/drivers/nvme/$bdf ]] \
|
2020-10-14 11:52:59 +00:00
|
|
|
|| [[ $(uname -s) == FreeBSD && $(pciconf -l "pci${bdf/./:}") == nvme* ]]; then
|
2020-06-23 08:25:15 +00:00
|
|
|
continue
|
|
|
|
fi
|
|
|
|
bdfs+=("$bdf")
|
|
|
|
done
|
|
|
|
((${#bdfs[@]})) || return 1
|
|
|
|
printf '%s\n' "${bdfs[@]}"
|
|
|
|
}
|
2020-10-21 16:59:14 +00:00
|
|
|
|
2020-11-03 13:31:21 +00:00
|
|
|
cmp_versions() {
|
2020-10-21 16:59:14 +00:00
|
|
|
local ver1 ver1_l
|
|
|
|
local ver2 ver2_l
|
|
|
|
|
|
|
|
IFS=".-:" read -ra ver1 <<< "$1"
|
|
|
|
IFS=".-:" read -ra ver2 <<< "$3"
|
|
|
|
local op=$2
|
|
|
|
|
|
|
|
ver1_l=${#ver1[@]}
|
|
|
|
ver2_l=${#ver2[@]}
|
|
|
|
|
|
|
|
local lt=0 gt=0 eq=0 v
|
|
|
|
case "$op" in
|
|
|
|
"<") : $((eq = gt = 1)) ;;
|
|
|
|
">") : $((eq = lt = 1)) ;;
|
|
|
|
"<=") : $((gt = 1)) ;;
|
|
|
|
">=") : $((lt = 1)) ;;
|
|
|
|
"==") : $((lt = gt = 1)) ;;
|
|
|
|
esac
|
|
|
|
|
|
|
|
decimal() (
|
|
|
|
local d=${1,,}
|
|
|
|
if [[ $d =~ ^[0-9]+$ ]]; then
|
|
|
|
echo $((10#$d))
|
|
|
|
elif [[ $d =~ ^0x || $d =~ ^[a-f0-9]+$ ]]; then
|
|
|
|
d=${d/0x/}
|
|
|
|
echo $((0x$d))
|
|
|
|
else
|
|
|
|
echo 0
|
|
|
|
fi
|
|
|
|
)
|
|
|
|
|
|
|
|
for ((v = 0; v < (ver1_l > ver2_l ? ver1_l : ver2_l); v++)); do
|
|
|
|
ver1[v]=$(decimal "${ver1[v]}")
|
|
|
|
ver2[v]=$(decimal "${ver2[v]}")
|
|
|
|
((ver1[v] > ver2[v])) && return "$gt"
|
|
|
|
((ver1[v] < ver2[v])) && return "$lt"
|
|
|
|
done
|
|
|
|
[[ ${ver1[*]} == "${ver2[*]}" ]] && return "$eq"
|
|
|
|
}
|
|
|
|
|
2020-11-03 13:31:21 +00:00
|
|
|
lt() { cmp_versions "$1" "<" "$2"; }
|
|
|
|
gt() { cmp_versions "$1" ">" "$2"; }
|
|
|
|
le() { cmp_versions "$1" "<=" "$2"; }
|
|
|
|
ge() { cmp_versions "$1" ">=" "$2"; }
|
|
|
|
eq() { cmp_versions "$1" "==" "$2"; }
|
2020-10-21 16:59:14 +00:00
|
|
|
neq() { ! eq "$1" "$2"; }
|