numam-spdk/configure

852 lines
26 KiB
Plaintext
Raw Normal View History

#!/usr/bin/env bash
set -e
trap 'echo -e "\n\nConfiguration failed\n\n" >&2' ERR
rootdir=$(readlink -f $(dirname $0))
function usage()
{
echo "'configure' configures SPDK to compile on supported platforms."
echo ""
echo "Usage: ./configure [OPTION]..."
echo ""
echo "Defaults for the options are specified in brackets."
echo ""
echo "General:"
echo " -h, --help Display this help and exit"
echo ""
echo " --prefix=path Configure installation prefix (default: /usr/local)"
echo " --target-arch=arch Target build architecture. Must be a valid GNU arch. Default: native"
echo ""
echo " --cross-prefix=prefix Prefix for cross compilation (default: none)"
echo " example: aarch64-linux-gnu"
echo ""
echo " --enable-debug Configure for debug builds"
echo " --enable-log-bt Enable support of backtrace printing in SPDK logs (requires libunwind)."
echo " --enable-werror Treat compiler warnings as errors"
echo " --enable-asan Enable address sanitizer"
echo " --enable-ubsan Enable undefined behavior sanitizer"
echo " --enable-coverage Enable code coverage tracking"
echo " --enable-lto Enable link-time optimization"
echo " --enable-pgo-capture Enable generation of profile guided optimization data"
echo " --enable-pgo-use Use previously captured profile guided optimization data"
echo " --disable-tests Disable building of functional tests"
echo " --disable-unit-tests Disable building of unit tests"
echo " --disable-examples Disable building of examples"
echo ""
echo "Specifying Dependencies:"
echo "--with-DEPENDENCY[=path] Use the given dependency. Optionally, provide the"
echo " path."
echo "--without-DEPENDENCY Do not link to the given dependency. This may"
echo " disable features and components."
echo ""
echo "Valid dependencies are listed below."
echo " dpdk Build against a custom dpdk version. By default, the dpdk"
echo " submodule in spdk tree will be used."
echo " example: /usr/share/dpdk/x86_64-default-linuxapp-gcc"
echo " env Use an alternate environment implementation instead of DPDK."
echo " Implies --without-dpdk."
echo " igb-uio-driver Build DPDK's igb-uio driver."
echo " Required on some systems to use qat devices. This flag is"
echo " effective only with the default dpdk submodule."
echo " No path required"
lib/idxd: add low level idxd library Module, etc., will follow. Notes: * IDXD is an Intel silicon feature available in future Intel CPUs. Initial development is being done on a simulator. Once HW is available and the code fully tested the experimental label will be lifted. Spec can be found here: https://software.intel.com/en-us/download/intel-data-streaming-accelerator-preliminary-architecture-specification * The current implementation will only work with VFIO. * DSA has a number of engines that can be grouped based on application need such as type of memory being served or QoS. Engines are processing units and are assigned to groups. Work queues are on device structures that act as front-end groups for queueing descriptors. Full details on what is configurable & how will come in later doc patches. * There is a finite number of work queue slots that are divided amongst the number of desired work queues in some fashion (ie evenly). * SW (outside of the idxd lib) is required to manage flow control, to not over-run the work queues.This is provided in the accel plug-in module. The upper layers use public API to manage this. * Work queue submissions are done with a 64 byte atomic instruction * The design here creates a set of descriptor rings per channel that match the size of the work queues. Then, an spdk_bit_array is used to make sure we don't overrun a queue. If there are not slots available, the operation is put on a linked list to be retried later from the poller. * As we need to support any number of channels (we can't limit ourselves to the number of work queues) we need to dynamically size/resize our per channel descriptor rings based on the number of current channels. This is done from upper layers via public API into the lib. * As channels are created, the total number of work queue slots is divided across the channels evenly. Same thing when they are destroyed, remaining channels with see the ring sizes increase. This is done from upper layers via public API into the lib. * The sim has 64 total work queue entries (WQE) that get dolled out to the work queues (WQ) evenly. Signed-off-by: paul luse <paul.e.luse@intel.com> Change-Id: I899bbeda3cef3db05bea4197b8757e89dddb579d Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1809 Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Vitaliy Mysak <vitaliy.mysak@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-10 15:29:01 +00:00
echo " idxd Build the IDXD library and accel framework plug-in module."
echo " Disabled while experimental. Only built for x86 when enabled."
echo " crypto Build vbdev crypto module."
echo " No path required."
echo " fio Build fio_plugin."
echo " default: /usr/src/fio"
echo " vhost Build vhost target. Enabled by default."
echo " No path required."
echo " internal-vhost-lib Use the internal copy of rte_vhost. By default, the upstream"
echo " rte_vhost from DPDK will be used."
echo " No path required."
echo " virtio Build vhost initiator and virtio-pci bdev modules."
echo " No path required."
echo " pmdk Build persistent memory bdev."
echo " example: /usr/share/pmdk"
echo " reduce Build vbdev compression module."
echo " No path required."
echo " vpp Build VPP net module."
echo " example: /vpp_repo/build-root/rpmbuild/vpp-18.01.1.0/build-root/install-vpp-native/vpp"
echo " rbd Build Ceph RBD bdev module."
echo " No path required."
echo " rdma Build RDMA transport for NVMf target and initiator."
echo " No path required."
echo " fc Build FC transport for NVMf target."
echo " If an argument is provided, it is considered a directory containing"
echo " libufc.a and fc_lld.h. Otherwise the regular system paths will"
echo " be searched."
echo " shared Build spdk shared libraries."
echo " No path required."
echo " iscsi-initiator Build with iscsi bdev module."
echo " No path required."
echo " vtune Required to profile I/O under Intel VTune Amplifier XE."
echo " example: /opt/intel/vtune_amplifier_xe_version"
echo " ocf Build OCF library and bdev module."
echo " If argument is directory, interpret it as root of OCF repo"
echo " If argument is file, interpret it as compiled OCF lib"
echo " If no argument is specified, OCF git submodule is used by default"
echo " example: /usr/src/ocf/"
echo " isal Build with ISA-L. Enabled by default on x86 and aarch64 architectures."
echo " No path required."
echo " uring Build I/O uring bdev or socket module."
echo " If an argument is provided, it is considered a directory containing"
echo " liburing.a and io_uring.h. Otherwise the regular system paths will"
echo " be searched."
echo " fuse Build FUSE components for mounting a blobfs filesystem."
echo " No path required."
echo " nvme-cuse Build NVMe driver with support for CUSE-based character devices."
echo " No path required."
echo " raid5 Build with bdev_raid module RAID5 support."
echo " No path required."
echo ""
echo "Environment variables:"
echo ""
echo "CC C compiler"
echo "CFLAGS C compiler flags"
echo "CXX C++ compiler"
echo "CXXFLAGS C++ compiler flags"
echo "LD Linker"
echo "LDFLAGS Linker flags"
echo "DESTDIR Destination for 'make install'"
echo ""
}
# Load default values
# Convert config to sourcable configuration file
sed -r 's/CONFIG_([[:alnum:]_]+)=(.*)/CONFIG[\1]=\2/g' $rootdir/CONFIG > $rootdir/CONFIG.sh
declare -A CONFIG
source $rootdir/CONFIG.sh
rm $rootdir/CONFIG.sh
for i in "$@"; do
case "$i" in
--cross-prefix=*)
CONFIG[CROSS_PREFIX]="${i#*=}"
;;
--enable-lto)
CONFIG[LTO]=y
;;
--disable-lto)
CONFIG[LTO]=n
;;
esac
done
# Detect the compiler toolchain
$rootdir/scripts/detect_cc.sh --cc="$CC" --cxx="$CXX" --lto="${CONFIG[LTO]}" --ld="$LD" --cross-prefix="${CONFIG[CROSS_PREFIX]}" > $rootdir/mk/cc.mk
CC=$(cat $rootdir/mk/cc.mk | grep "DEFAULT_CC=" | sed s/DEFAULT_CC=//)
CC_TYPE=$(cat $rootdir/mk/cc.mk | grep "CC_TYPE=" | cut -d "=" -f 2)
arch=$($CC -dumpmachine)
sys_name=$(uname -s)
# Sanitize default configuration. All parameters set by user explicit should fail
# Force no ISA-L if non-x86 or non-aarch64 architecture
if [[ "${CONFIG[ISAL]}" = "y" ]]; then
if [[ $arch != x86_64* ]] && [[ $arch != aarch64* ]]; then
CONFIG[ISAL]=n
echo "Notice: ISA-L not supported for ${arch}. Turning off default feature."
fi
fi
if [[ $sys_name == "FreeBSD" ]]; then
# Vhost, rte_vhost library and virtio are only supported on Linux.
CONFIG[VHOST]="n"
CONFIG[VHOST_INTERNAL_LIB]="n"
CONFIG[VIRTIO]="n"
echo "Notice: Vhost, rte_vhost library and virtio are only supported on Linux. Turning off default feature."
fi
#check nasm only on x86
if [[ $arch == x86_64* ]]; then
ver=$(nasm -v 2>/dev/null | awk '{print $3}' | sed 's/[^0-9]*//g')
if [[ "${ver:0:1}" -le "2" ]] && [[ "${ver:0:3}" -le "213" ]] && [[ "${ver:0:5}" -lt "21303" ]]; then
# ISA-L, compression & crypto require NASM version 2.13.03 or newer.
CONFIG[ISAL]=n
CONFIG[CRYPTO]=n
CONFIG[IPSEC_MB]=n
CONFIG[REDUCE]=n
HAVE_NASM=n
echo "Notice: ISA-L, compression & crypto require NASM version 2.13.03 or newer. Turning off default ISA-L and crypto features."
else
HAVE_NASM=y
fi
fi
function check_dir() {
arg="$1"
dir="${arg#*=}"
if [ ! -d "$dir" ]; then
echo "$arg: directory not found"
exit 1
fi
}
for i in "$@"; do
case "$i" in
-h|--help)
usage
exit 0
;;
--cross-prefix=*) ;&
--enable-lto) ;&
--disable-lto)
# Options handled before detecting CC.
;;
--prefix=*)
CONFIG[PREFIX]="${i#*=}"
;;
--target-arch=*)
CONFIG[ARCH]="${i#*=}"
;;
--enable-debug)
CONFIG[DEBUG]=y
;;
--disable-debug)
CONFIG[DEBUG]=n
;;
--enable-log-bt)
CONFIG[LOG_BACKTRACE]=y
;;
--disable-log-bt)
CONFIG[LOG_BACKTRACE]=n
;;
--enable-asan)
CONFIG[ASAN]=y
;;
--disable-asan)
CONFIG[ASAN]=n
;;
--enable-ubsan)
CONFIG[UBSAN]=y
;;
--disable-ubsan)
CONFIG[UBSAN]=n
;;
--enable-tsan)
CONFIG[TSAN]=y
;;
--disable-tsan)
CONFIG[TSAN]=n
;;
--enable-coverage)
CONFIG[COVERAGE]=y
;;
--disable-coverage)
CONFIG[COVERAGE]=n
;;
--enable-pgo-capture)
CONFIG[PGO_CAPTURE]=y
;;
--disable-pgo-capture)
CONFIG[PGO_CAPTURE]=n
;;
--enable-pgo-use)
CONFIG[PGO_USE]=y
;;
--disable-pgo-use)
CONFIG[PGO_USE]=n
;;
--enable-tests)
CONFIG[TESTS]=y
;;
--disable-tests)
CONFIG[TESTS]=n
;;
--enable-unit-tests)
CONFIG[UNIT_TESTS]=y
;;
--disable-unit-tests)
CONFIG[UNIT_TESTS]=n
;;
--enable-examples)
CONFIG[EXAMPLES]=y
;;
--disable-examples)
CONFIG[EXAMPLES]=n
;;
--enable-werror)
CONFIG[WERROR]=y
;;
--disable-werror)
CONFIG[WERROR]=n
;;
--with-dpdk=*)
check_dir "$i"
CONFIG[DPDK_DIR]=$(readlink -f ${i#*=})
;;
--without-dpdk)
CONFIG[DPDK_DIR]=
;;
--with-env=*)
CONFIG[ENV]="${i#*=}"
;;
--with-rbd)
CONFIG[RBD]=y
;;
--without-rbd)
CONFIG[RBD]=n
;;
--with-rdma)
CONFIG[RDMA]=y
;;
--without-rdma)
CONFIG[RDMA]=n
;;
--with-fc=*)
CONFIG[FC]=y
CONFIG[FC_PATH]=$(readlink -f ${i#*=})
;;
--with-fc)
CONFIG[FC]=y
CONFIG[FC_PATH]=
;;
--without-fc)
CONFIG[FC]=n
CONFIG[FC_PATH]=
;;
--with-shared)
CONFIG[SHARED]=y
;;
--without-shared)
CONFIG[SHARED]=n
;;
--with-iscsi-initiator)
CONFIG[ISCSI_INITIATOR]=y
;;
--without-iscsi-initiator)
CONFIG[ISCSI_INITIATOR]=n
;;
--with-crypto)
CONFIG[CRYPTO]=y
;;
--without-crypto)
CONFIG[CRYPTO]=n
;;
--with-vhost)
CONFIG[VHOST]=y
;;
--without-vhost)
CONFIG[VHOST]=n
;;
--with-internal-vhost-lib)
CONFIG[VHOST_INTERNAL_LIB]=y
;;
--without-internal-vhost-lib)
CONFIG[VHOST_INTERNAL_LIB]=n
;;
--with-virtio)
CONFIG[VIRTIO]=y
;;
--without-virtio)
CONFIG[VIRTIO]=n
;;
--with-pmdk)
CONFIG[PMDK]=y
CONFIG[PMDK_DIR]=""
;;
--with-pmdk=*)
CONFIG[PMDK]=y
check_dir "$i"
CONFIG[PMDK_DIR]=$(readlink -f ${i#*=})
;;
--without-pmdk)
CONFIG[PMDK]=n
;;
--with-reduce)
CONFIG[REDUCE]=y
;;
--without-reduce)
CONFIG[REDUCE]=n
;;
--with-vpp)
CONFIG[VPP]=y
;;
--with-vpp=*)
CONFIG[VPP]=y
check_dir "$i"
CONFIG[VPP_DIR]=$(readlink -f ${i#*=})
;;
--without-vpp)
CONFIG[VPP]=n
;;
--with-fio) ;&
--with-fio=*)
if [[ ${i#*=} != "$i" ]]; then
CONFIG[FIO_SOURCE_DIR]=$(readlink -f "${i#*=}")
fi
check_dir "--with-fio=${CONFIG[FIO_SOURCE_DIR]}"
CONFIG[FIO_PLUGIN]=y
;;
--without-fio)
CONFIG[FIO_PLUGIN]=n
;;
--with-vtune=*)
check_dir "$i"
CONFIG[VTUNE_DIR]="${i#*=}"
CONFIG[VTUNE]=y
;;
--without-vtune)
CONFIG[VTUNE_DIR]=
CONFIG[VTUNE]=n
;;
--with-igb-uio-driver)
CONFIG[IGB_UIO_DRIVER]=y
;;
--without-igb-uio-driver)
CONFIG[IGB_UIO_DRIVER]=n
;;
--with-ocf)
CONFIG[OCF]=y
CONFIG[OCF_PATH]=$(readlink -f "./ocf")
;;
--with-ocf=*)
CONFIG[OCF]=y
CONFIG[OCF_PATH]=$(readlink -f ${i#*=})
;;
--without-ocf)
CONFIG[OCF]=n
CONFIG[OCF_PATH]=
;;
--with-isal)
CONFIG[ISAL]=y
;;
--without-isal)
CONFIG[ISAL]=n
;;
--with-uring=*)
CONFIG[URING]=y
CONFIG[URING_PATH]=$(readlink -f ${i#*=})
;;
--with-uring)
CONFIG[URING]=y
CONFIG[URING_PATH]=
;;
--without-uring)
CONFIG[URING]=n
CONFIG[URING_PATH]=
;;
--with-fuse)
CONFIG[FUSE]=y
;;
--without-fuse)
CONFIG[FUSE]=n
;;
--with-nvme-cuse)
CONFIG[NVME_CUSE]=y
;;
--without-nvme-cuse)
CONFIG[NVME_CUSE]=n
;;
--with-raid5)
CONFIG[RAID5]=y
;;
--without-raid5)
CONFIG[RAID5]=n
;;
lib/idxd: add low level idxd library Module, etc., will follow. Notes: * IDXD is an Intel silicon feature available in future Intel CPUs. Initial development is being done on a simulator. Once HW is available and the code fully tested the experimental label will be lifted. Spec can be found here: https://software.intel.com/en-us/download/intel-data-streaming-accelerator-preliminary-architecture-specification * The current implementation will only work with VFIO. * DSA has a number of engines that can be grouped based on application need such as type of memory being served or QoS. Engines are processing units and are assigned to groups. Work queues are on device structures that act as front-end groups for queueing descriptors. Full details on what is configurable & how will come in later doc patches. * There is a finite number of work queue slots that are divided amongst the number of desired work queues in some fashion (ie evenly). * SW (outside of the idxd lib) is required to manage flow control, to not over-run the work queues.This is provided in the accel plug-in module. The upper layers use public API to manage this. * Work queue submissions are done with a 64 byte atomic instruction * The design here creates a set of descriptor rings per channel that match the size of the work queues. Then, an spdk_bit_array is used to make sure we don't overrun a queue. If there are not slots available, the operation is put on a linked list to be retried later from the poller. * As we need to support any number of channels (we can't limit ourselves to the number of work queues) we need to dynamically size/resize our per channel descriptor rings based on the number of current channels. This is done from upper layers via public API into the lib. * As channels are created, the total number of work queue slots is divided across the channels evenly. Same thing when they are destroyed, remaining channels with see the ring sizes increase. This is done from upper layers via public API into the lib. * The sim has 64 total work queue entries (WQE) that get dolled out to the work queues (WQ) evenly. Signed-off-by: paul luse <paul.e.luse@intel.com> Change-Id: I899bbeda3cef3db05bea4197b8757e89dddb579d Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1809 Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Vitaliy Mysak <vitaliy.mysak@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-10 15:29:01 +00:00
--with-idxd)
CONFIG[IDXD]=y
;;
--without-idxd)
CONFIG[IDXD]=n
;;
--)
break
;;
*)
echo "Unrecognized option $i"
usage
exit 1
esac
done
if [[ $arch == x86_64* ]]; then
BUILD_CMD=($CC -o /dev/null -x c $CPPFLAGS $CFLAGS $LDFLAGS -march=native)
else
BUILD_CMD=($CC -o /dev/null -x c $CPPFLAGS $CFLAGS $LDFLAGS)
fi
lib/idxd: add low level idxd library Module, etc., will follow. Notes: * IDXD is an Intel silicon feature available in future Intel CPUs. Initial development is being done on a simulator. Once HW is available and the code fully tested the experimental label will be lifted. Spec can be found here: https://software.intel.com/en-us/download/intel-data-streaming-accelerator-preliminary-architecture-specification * The current implementation will only work with VFIO. * DSA has a number of engines that can be grouped based on application need such as type of memory being served or QoS. Engines are processing units and are assigned to groups. Work queues are on device structures that act as front-end groups for queueing descriptors. Full details on what is configurable & how will come in later doc patches. * There is a finite number of work queue slots that are divided amongst the number of desired work queues in some fashion (ie evenly). * SW (outside of the idxd lib) is required to manage flow control, to not over-run the work queues.This is provided in the accel plug-in module. The upper layers use public API to manage this. * Work queue submissions are done with a 64 byte atomic instruction * The design here creates a set of descriptor rings per channel that match the size of the work queues. Then, an spdk_bit_array is used to make sure we don't overrun a queue. If there are not slots available, the operation is put on a linked list to be retried later from the poller. * As we need to support any number of channels (we can't limit ourselves to the number of work queues) we need to dynamically size/resize our per channel descriptor rings based on the number of current channels. This is done from upper layers via public API into the lib. * As channels are created, the total number of work queue slots is divided across the channels evenly. Same thing when they are destroyed, remaining channels with see the ring sizes increase. This is done from upper layers via public API into the lib. * The sim has 64 total work queue entries (WQE) that get dolled out to the work queues (WQ) evenly. Signed-off-by: paul luse <paul.e.luse@intel.com> Change-Id: I899bbeda3cef3db05bea4197b8757e89dddb579d Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1809 Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Vitaliy Mysak <vitaliy.mysak@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-10 15:29:01 +00:00
# IDXD uses Intel specific instructions.
if [[ "${CONFIG[IDXD]}" = "y" ]]; then
if [ $(uname -s) == "FreeBSD" ]; then
intel="hw.model: Intel"
cpu_vendor=$(sysctl -a | grep hw.model | cut -c 1-15)
else
intel="GenuineIntel"
cpu_vendor=$(grep -i 'vendor' /proc/cpuinfo --max-count=1)
fi
lib/idxd: add low level idxd library Module, etc., will follow. Notes: * IDXD is an Intel silicon feature available in future Intel CPUs. Initial development is being done on a simulator. Once HW is available and the code fully tested the experimental label will be lifted. Spec can be found here: https://software.intel.com/en-us/download/intel-data-streaming-accelerator-preliminary-architecture-specification * The current implementation will only work with VFIO. * DSA has a number of engines that can be grouped based on application need such as type of memory being served or QoS. Engines are processing units and are assigned to groups. Work queues are on device structures that act as front-end groups for queueing descriptors. Full details on what is configurable & how will come in later doc patches. * There is a finite number of work queue slots that are divided amongst the number of desired work queues in some fashion (ie evenly). * SW (outside of the idxd lib) is required to manage flow control, to not over-run the work queues.This is provided in the accel plug-in module. The upper layers use public API to manage this. * Work queue submissions are done with a 64 byte atomic instruction * The design here creates a set of descriptor rings per channel that match the size of the work queues. Then, an spdk_bit_array is used to make sure we don't overrun a queue. If there are not slots available, the operation is put on a linked list to be retried later from the poller. * As we need to support any number of channels (we can't limit ourselves to the number of work queues) we need to dynamically size/resize our per channel descriptor rings based on the number of current channels. This is done from upper layers via public API into the lib. * As channels are created, the total number of work queue slots is divided across the channels evenly. Same thing when they are destroyed, remaining channels with see the ring sizes increase. This is done from upper layers via public API into the lib. * The sim has 64 total work queue entries (WQE) that get dolled out to the work queues (WQ) evenly. Signed-off-by: paul luse <paul.e.luse@intel.com> Change-Id: I899bbeda3cef3db05bea4197b8757e89dddb579d Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1809 Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Vitaliy Mysak <vitaliy.mysak@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-10 15:29:01 +00:00
if [[ "$cpu_vendor" != *"$intel"* ]]; then
echo "ERROR: IDXD cannot be used due to CPU incompatiblity."
exit 1
fi
fi
# Detect architecture and force no ISA-L if non-x86 or non-aarch64 architecture
if [[ "${CONFIG[ISAL]}" = "y" ]]; then
if [[ $arch != x86_64* ]] && [[ $arch != aarch64* ]]; then
echo "ERROR: ISA-L cannot be used due to CPU incompatiblity."
exit 1
fi
fi
if [[ "${CONFIG[ISAL]}" = "n" ]] && [[ "${CONFIG[REDUCE]}" = "y" ]]; then
echo "ERROR Conflicting options: --with-reduce is not compatible with --without-isal."
exit 1
fi
if [ -z "${CONFIG[ENV]}" ]; then
CONFIG[ENV]=$rootdir/lib/env_dpdk
echo "Using default SPDK env in ${CONFIG[ENV]}"
if [ -z "${CONFIG[DPDK_DIR]}" ]; then
if [ ! -f "$rootdir"/dpdk/config/common_base ]; then
echo "DPDK not found; please specify --with-dpdk=<path> or run:"
echo
echo " git submodule update --init"
exit 1
else
CONFIG[DPDK_DIR]="${rootdir}/dpdk/build"
echo "Using default DPDK in ${CONFIG[DPDK_DIR]}"
fi
if [[ "${CONFIG[VHOST]}" = "y" ]] && [[ "${CONFIG[VHOST_INTERNAL_LIB]}" = "n" ]]; then
# We lookup "common_linux" file to check if DPDK version is >= 19.05.
# "common_linux" is available since exactly DPDK 19.05 - it was renamed
# from "common_linuxapp".
if [ ! -f "$rootdir"/dpdk/config/common_linux ]; then
echo "Notice: Using internal, legacy rte_vhost library due to DPDK" \
"version < 19.05"
CONFIG[VHOST_INTERNAL_LIB]=y
fi
fi
else
if [[ "${CONFIG[VHOST]}" = "y" ]] && [[ "${CONFIG[VHOST_INTERNAL_LIB]}" = "n" ]]; then
# DPDK must be already built, so we can simply try to use the new rte_vhost.
# It has a number of internal dependencies though, so don't try to link the
# program, just compile it
if ! echo -e '#include <rte_vhost.h>\n' \
'int main(void) { return rte_vhost_extern_callback_register(0, NULL, NULL); }\n' \
| ${BUILD_CMD[@]} -c -Wno-deprecated-declarations -Werror \
-I"${CONFIG[DPDK_DIR]}/include" - &>/dev/null; then
echo "Notice: DPDK's rte_vhost not found or version < 19.05, using internal," \
"legacy rte_vhost library."
CONFIG[VHOST_INTERNAL_LIB]=y
fi
fi
fi
else
if [ -n "${CONFIG[DPDK_DIR]}" ]; then
echo "--with-env and --with-dpdk are mutually exclusive."
exit 1
fi
if [ "${CONFIG[VHOST]}" = "y" ]; then
echo "Vhost is only supported when using the default DPDK environment. Disabling it."
fi
# Always disable vhost, but only print the error message if the user explicitly turned it on.
CONFIG[VHOST]="n"
if [ "${CONFIG[VIRTIO]}" = "y" ]; then
echo "Virtio is only supported when using the default DPDK environment. Disabling it."
fi
# Always disable virtio, but only print the error message if the user explicitly turned it on.
CONFIG[VIRTIO]="n"
fi
if [ "${CONFIG[VTUNE]}" = "y" ]; then
if [ -z "${CONFIG[VTUNE_DIR]}" ]; then
echo "When VTune is enabled, you must specify the VTune directory using --with-vtune=path"
exit 1
fi
fi
if [ "${CONFIG[ASAN]}" = "y" -a "${CONFIG[TSAN]}" = "y" ]; then
echo "ERROR: ASAN and TSAN cannot be enabled at the same time."
exit 1
fi
if [[ $sys_name == "FreeBSD" ]]; then
# FreeBSD doesn't support all configurations
if [[ "${CONFIG[COVERAGE]}" == "y" ]]; then
echo "ERROR: CONFIG_COVERAGE not available on FreeBSD"
exit 1
fi
fi
if [[ $sys_name == "FreeBSD" ]]; then
if [[ "${CONFIG[VHOST]}" == "y" ]]; then
echo "Vhost is only supported on Linux."
exit 1
fi
2019-09-11 08:00:15 +00:00
if [[ "${CONFIG[VHOST_INTERNAL_LIB]}" == "y" ]]; then
echo "Internal rte_vhost library is only supported on Linux."
exit 1
2019-09-11 08:00:15 +00:00
fi
if [[ "${CONFIG[VIRTIO]}" == "y" ]]; then
echo "Virtio is only supported on Linux."
exit 1
fi
fi
if [ "${CONFIG[RDMA]}" = "y" ]; then
if ! echo -e '#include <infiniband/verbs.h>\n#include <rdma/rdma_verbs.h>\n' \
'int main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -libverbs -lrdmacm - 2>/dev/null; then
echo --with-rdma requires libverbs and librdmacm.
echo Please install then re-run this script.
exit 1
fi
if echo -e '#include <infiniband/verbs.h>\n' \
'int main(void) { return !!IBV_WR_SEND_WITH_INV; }\n' \
| ${BUILD_CMD[@]} -c - 2>/dev/null; then
CONFIG[RDMA_SEND_WITH_INVAL]="y"
else
CONFIG[RDMA_SEND_WITH_INVAL]="n"
echo "
*******************************************************************************
WARNING: The Infiniband Verbs opcode Send With Invalidate is either not
supported or is not functional with the current version of libibverbs installed
on this system. Please upgrade to at least version 1.1.
Beginning with Linux kernel 4.14, the kernel NVMe-oF initiator leverages Send
With Invalidate RDMA operations to improve performance. Failing to use the
Send With Invalidate operation on the NVMe-oF target side results in full
functionality, but greatly reduced performance. The SPDK NVMe-oF target will
be unable to leverage that operation using the currently installed version
of libibverbs, so Linux kernel NVMe-oF initiators based on kernels greater
than or equal to 4.14 will see significantly reduced performance.
*******************************************************************************"
fi
if echo -e '#include <rdma/rdma_cma.h>\n' \
'int main(void) { return !!RDMA_OPTION_ID_ACK_TIMEOUT; }\n' \
| ${BUILD_CMD[@]} -c - 2>/dev/null; then
CONFIG[RDMA_SET_ACK_TIMEOUT]="y"
else
CONFIG[RDMA_SET_ACK_TIMEOUT]="n"
echo "RDMA_OPTION_ID_ACK_TIMEOUT is not supported"
fi
fi
if [[ "${CONFIG[FC]}" = "y" ]]; then
if [[ -n "${CONFIG[FC_PATH]}" ]]; then
if [ ! -d "${CONFIG[FC_PATH]}" ]; then
echo "${CONFIG[FC_PATH]}: directory not found"
exit 1
fi
fi
fi
if [[ "${CONFIG[ISAL]}" = "y" ]] || [[ "${CONFIG[CRYPTO]}" = "y" ]]; then
if [[ "${HAVE_NASM}" = "n" ]] && [[ $arch == x86_64* ]]; then
echo "ERROR: ISA-L, compression & crypto require NASM version 2.13.03 or newer."
echo "Please install or upgrade them re-run this script."
exit 1
else
if [[ "${CONFIG[CRYPTO]}" = "y" ]]; then
CONFIG[IPSEC_MB]=y
fi
fi
fi
if [[ "${CONFIG[ISAL]}" = "y" ]]; then
if [ ! -f "$rootdir"/isa-l/autogen.sh ]; then
echo "ISA-L was not found; To install ISA-L run:"
echo " git submodule update --init"
exit 1
fi
cd $rootdir/isa-l
ISAL_LOG=$rootdir/isa-l/spdk-isal.log
echo -n "Configuring ISA-L (logfile: $ISAL_LOG)..."
./autogen.sh &> $ISAL_LOG
./configure CFLAGS="-fPIC -g -O2" --enable-shared=no >> $ISAL_LOG 2>&1
echo "done."
cd $rootdir
fi
if [[ "${CONFIG[PMDK]}" = "y" ]]; then
if ! echo -e '#include <libpmemblk.h>\nint main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -lpmemblk - 2>/dev/null; then
echo --with-pmdk requires libpmemblk.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[REDUCE]}" = "y" ]]; then
if ! echo -e '#include <libpmem.h>\nint main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -lpmem - 2>/dev/null; then
echo --with-reduce requires libpmem.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[VPP]}" = "y" ]]; then
if [ ! -z "${CONFIG[VPP_DIR]}" ]; then
VPP_CFLAGS="-L${CONFIG[VPP_DIR]}/lib -I${CONFIG[VPP_DIR]}/include"
fi
if ! echo -e '#include <vnet/session/application_interface.h>\nint main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} ${VPP_CFLAGS} -lvppinfra -lsvm -lvlibmemoryclient - 2>/dev/null; then
echo --with-vpp requires installed vpp.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[NVME_CUSE]}" = "y" ]]; then
if ! echo -e '#define FUSE_USE_VERSION 31\n#include <fuse3/cuse_lowlevel.h>\n#include <fuse3/fuse_lowlevel.h>\n#include <fuse3/fuse_opt.h>\nint main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -lfuse3 -D_FILE_OFFSET_BITS=64 - 2>/dev/null; then
echo --with-cuse requires libfuse3.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[RBD]}" = "y" ]]; then
if ! echo -e '#include <rbd/librbd.h>\n#include <rados/librados.h>\n' \
'int main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -lrados -lrbd - 2>/dev/null; then
echo --with-rbd requires librados and librbd.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[ISCSI_INITIATOR]}" = "y" ]]; then
# Fedora installs libiscsi to /usr/lib64/iscsi for some reason.
if ! echo -e '#include <iscsi/iscsi.h>\n#include <iscsi/scsi-lowlevel.h>\n' \
'#if LIBISCSI_API_VERSION < 20150621\n' \
'#error\n' \
'#endif\n' \
'int main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -L/usr/lib64/iscsi -liscsi - 2>/dev/null; then
echo --with-iscsi-initiator requires libiscsi with
echo 'LIBISCSI_API_VERSION >= 20150621.'
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[LOG_BACKTRACE]}" = "y" ]]; then
if ! echo -e '#include <libunwind.h>\nint main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -lunwind - 2>/dev/null; then
echo --enable-log-bt requires libunwind.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[ASAN]}" = "y" ]]; then
if ! echo -e 'int main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -fsanitize=address - 2>/dev/null; then
echo --enable-asan requires libasan.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[UBSAN]}" = "y" ]]; then
if ! echo -e 'int main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -fsanitize=undefined - 2>/dev/null; then
echo --enable-ubsan requires libubsan.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[TSAN]}" = "y" ]]; then
if ! echo -e 'int main(void) { return 0; }\n' \
| ${BUILD_CMD[@]} -fsanitize=thread - 2>/dev/null; then
echo --enable-tsan requires libtsan.
echo Please install then re-run this script.
exit 1
fi
fi
if [[ "${CONFIG[OCF]}" = "y" ]]; then
# If OCF_PATH is a file, assume it is a library and use it to compile with
if [ -f ${CONFIG[OCF_PATH]} ]; then
CONFIG[CUSTOMOCF]=y
else
CONFIG[CUSTOMOCF]=n
fi
fi
if [[ "${CONFIG[PGO_CAPTURE]}" = "y" && "${CONFIG[PGO_USE]}" = "y" ]]; then
echo "ERROR: --enable-pgo-capture and --enable-pgo-use are mutually exclusive."
exit 1
elif [[ "${CONFIG[PGO_USE]}" = "y" ]]; then
if [[ "$CC_TYPE" = "clang" ]]; then
# For clang we need to run an extra step on gathered profiling data.
echo "Generating suitable profile data"
llvm-profdata merge -output=build/pgo/default.profdata build/pgo
fi
fi
if [[ "${CONFIG[URING]}" = "y" ]]; then
if [[ -n "${CONFIG[URING_PATH]}" ]]; then
if [ ! -d "${CONFIG[URING_PATH]}" ]; then
echo "${CONFIG[URING_PATH]}: directory not found"
exit 1
fi
fi
fi
if [[ "${CONFIG[FUSE]}" = "y" ]]; then
if [[ ! -d /usr/include/fuse3 ]] && [[ ! -d /usr/local/include/fuse3 ]]; then
echo "--with-fuse requires libfuse3."
echo "Please install then re-run this script."
exit 1
fi
fi
# We are now ready to generate final configuration. But first do sanity
# check to see if all keys in CONFIG array have its reflection in CONFIG file.
if [ $(egrep -c "^\s*CONFIG_[[:alnum:]_]+=" $rootdir/CONFIG) -ne ${#CONFIG[@]} ]; then
echo ""
echo "BUG: Some configuration options are not present in CONFIG file. Please update this file."
echo "Missing options in CONFIG (+) file and in current config (-): "
diff -u --label "CONFIG file" --label "CONFIG[@]" \
<(sed -r -e '/^\s*$/d; /^\s*#.*/d; s/(CONFIG_[[:alnum:]_]+)=.*/\1/g' CONFIG | sort) \
<(printf "CONFIG_%s\n" ${!CONFIG[@]} | sort)
exit 1
fi
echo -n "Creating mk/config.mk..."
cp -f $rootdir/CONFIG $rootdir/mk/config.mk
for key in ${!CONFIG[@]}; do
sed -i.bak -r "s#^\s*CONFIG_${key}=.*#CONFIG_${key}\?=${CONFIG[$key]}#g" $rootdir/mk/config.mk
done
# On FreeBSD sed -i 'SUFFIX' - SUFFIX is mandatory. So no way but to delete the backed file.
rm -f $rootdir/mk/config.mk.bak
echo "done."
# Environment variables
echo -n "Creating mk/cc.flags.mk..."
rm -f $rootdir/mk/cc.flags.mk
[ -n "$CFLAGS" ] && echo "CFLAGS?=$CFLAGS" > $rootdir/mk/cc.flags.mk
[ -n "$CXXFLAGS" ] && echo "CXXFLAGS?=$CXXFLAGS" >> $rootdir/mk/cc.flags.mk
[ -n "$LDFLAGS" ] && echo "LDFLAGS?=$LDFLAGS" >> $rootdir/mk/cc.flags.mk
[ -n "$DESTDIR" ] && echo "DESTDIR?=$DESTDIR" >> $rootdir/mk/cc.flags.mk
echo "done."
# Create .sh with build config for easy sourcing|lookup during the tests.
for conf in "${!CONFIG[@]}"; do
echo "CONFIG_$conf=${CONFIG[$conf]}"
done >"$rootdir/test/common/build_config.sh"
if [[ $sys_name == "FreeBSD" ]]; then
echo "Type 'gmake' to build."
else
echo "Type 'make' to build."
fi
exit 0