numam-spdk/configure

473 lines
13 KiB
Plaintext
Raw Normal View History

#!/usr/bin/env bash
set -e
function usage()
{
echo "'configure' configures SPDK to compile on supported platforms."
echo ""
echo "Usage: ./configure [OPTION]..."
echo ""
echo "Defaults for the options are specified in brackets."
echo ""
echo "General:"
echo " -h, --help Display this help and exit"
echo ""
echo " --prefix=path Configure installation prefix (default: /usr/local)"
echo ""
echo " --enable-debug Configure for debug builds"
echo " --enable-log-bt=lvl Show backtrace using libunwind when logging message at level <= lvl."
echo " Valid values are: ERROR, WARN, NOTICE, DEBUG."
echo " --enable-werror Treat compiler warnings as errors"
echo " --enable-asan Enable address sanitizer"
echo " --enable-ubsan Enable undefined behavior sanitizer"
echo " --enable-coverage Enable code coverage tracking"
echo " --enable-lto Enable link-time optimization"
echo " --disable-tests Disable building of tests"
echo " --with-env=path Use an alternate environment implementation"
echo ""
echo "Specifying Dependencies:"
echo "--with-DEPENDENCY[=path] Use the given dependency. Optionally, provide the"
echo " path."
echo "--without-DEPENDENCY Do not link to the given dependency. This may"
echo " disable features and components."
echo ""
echo "Valid dependencies are listed below."
echo " crypto Required to build vbdev crypto module."
echo " No path required."
echo " dpdk Optional. Uses dpdk submodule in spdk tree if not specified."
echo " example: /usr/share/dpdk/x86_64-default-linuxapp-gcc"
echo " fio Required to build fio_plugin."
echo " example: /usr/src/fio"
echo " vhost Required to build vhost target."
echo " No path required."
echo " virtio Required to build vhost initiator (Virtio) bdev module."
echo " No path required."
echo " pmdk Required to build persistent memory bdev."
echo " example: /usr/share/pmdk"
echo " vpp Required to build VPP net module."
echo " example: /vpp_repo/build-root/install-vpp-native/vpp"
echo " rbd [disabled]"
echo " No path required."
echo " rdma [disabled]"
echo " No path required."
echo " shared Required to build spdk shared libraries."
echo " No path required."
echo " iscsi-initiator [disabled]"
echo " No path required."
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
echo " raid [disabled]"
echo " No path required."
echo " vtune Required to profile I/O under Intel VTune Amplifier XE."
echo " example: /opt/intel/vtune_amplifier_xe_version"
echo ""
echo "Environment variables:"
echo ""
echo "CFLAGS C compiler flags"
echo "CXXFLAGS C++ compiler flags"
echo "LDFLAGS Linker flags"
echo "DESTDIR Destination for 'make install'"
echo ""
}
function check_dir() {
arg="$1"
dir="${arg#*=}"
if [ ! -d "$dir" ]; then
echo "$arg: directory not found"
exit 1
fi
}
for i in "$@"; do
case "$i" in
-h|--help)
usage
exit 0
;;
--prefix=*)
CONFIG_PREFIX="${i#*=}"
;;
--enable-debug)
CONFIG_DEBUG=y
;;
--disable-debug)
CONFIG_DEBUG=n
;;
--enable-log-bt=*)
CONFIG_LOG_BACKTRACE=${i#*=}
;;
--enable-asan)
CONFIG_ASAN=y
;;
--disable-asan)
CONFIG_ASAN=n
;;
--enable-ubsan)
CONFIG_UBSAN=y
;;
--disable-ubsan)
CONFIG_UBSAN=n
;;
--enable-tsan)
CONFIG_TSAN=y
;;
--disable-tsan)
CONFIG_TSAN=n
;;
--enable-coverage)
CONFIG_COVERAGE=y
;;
--disable-coverage)
CONFIG_COVERAGE=n
;;
--enable-lto)
CONFIG_LTO=y
;;
--disable-lto)
CONFIG_LTO=n
;;
--enable-tests)
CONFIG_TESTS=y
;;
--disable-tests)
CONFIG_TESTS=n
;;
--enable-werror)
CONFIG_WERROR=y
;;
--disable-werror)
CONFIG_WERROR=n
;;
--with-env=*)
CONFIG_ENV="${i#*=}"
;;
--with-rbd)
CONFIG_RBD=y
;;
--without-rbd)
CONFIG_RBD=n
;;
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
--with-raid)
CONFIG_RAID=y
echo "Warning: the RAID module does not support multiple iovecs per request and will not work with vhost"
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
;;
--without-raid)
CONFIG_RAID=n
;;
--with-rdma)
CONFIG_RDMA=y
;;
--without-rdma)
CONFIG_RDMA=n
;;
--with-shared)
CONFIG_SHARED=y
;;
--without-shared)
CONFIG_SHARED=n
;;
--with-iscsi-initiator)
CONFIG_ISCSI_INITIATOR=y
;;
--without-iscsi-initiator)
CONFIG_ISCSI_INITIATOR=n
;;
--with-dpdk=*)
check_dir "$i"
CONFIG_DPDK_DIR=$(readlink -f ${i#*=})
;;
--without-dpdk)
CONFIG_DPDK_DIR=
;;
--with-crypto)
CONFIG_CRYPTO=y
;;
--without-crypto)
CONFIG_CRYPTO=n
;;
--with-vhost)
CONFIG_VHOST=y
;;
--without-vhost)
CONFIG_VHOST=n
;;
--with-virtio)
CONFIG_VIRTIO=y
;;
--without-virtio)
CONFIG_VIRTIO=n
;;
--with-pmdk)
CONFIG_PMDK=y
;;
--with-pmdk=*)
CONFIG_PMDK=y
check_dir "$i"
CONFIG_PMDK_DIR=$(readlink -f ${i#*=})
;;
--without-pmdk)
CONFIG_PMDK=n
;;
--with-vpp)
CONFIG_VPP=y
;;
--with-vpp=*)
CONFIG_VPP=y
check_dir "$i"
CONFIG_VPP_DIR=$(readlink -f ${i#*=})
;;
--without-vpp)
CONFIG_VPP=n
;;
--with-fio=*)
check_dir "$i"
FIO_SOURCE_DIR="${i#*=}"
CONFIG_FIO_PLUGIN=y
;;
--without-fio)
FIO_SOURCE_DIR=
CONFIG_FIO_PLUGIN=n
;;
--with-vtune=*)
check_dir "$i"
VTUNE_SOURCE_DIR="${i#*=}"
CONFIG_VTUNE=y
;;
--without-vtune)
VTUNE_SOURCE_DIR=
CONFIG_VTUNE=n
;;
--)
break
;;
*)
echo "Unrecognized option $i"
usage
exit 1
esac
done
case "$CONFIG_LOG_BACKTRACE" in
""|ERROR|WARN|NOTICE|DEBUG)
;;
*)
echo "Invalid '--enable-log-bt=$CONFIG_LOG_BACKTRACE' option. Valid levels are ERROR, WARN, NOTICE or DEBUG"
exit 1
;;
esac
if [ -z "$CONFIG_ENV" ]; then
if [ -z "$CONFIG_DPDK_DIR" ]; then
rootdir=$(readlink -f $(dirname $0))
if [ ! -f "$rootdir"/dpdk/config/common_spdk ]; then
echo "DPDK not found; please specify --with-dpdk=<path> or run:"
echo
echo " git submodule update --init"
exit 1
fi
CONFIG_DPDK_DIR="$rootdir"/dpdk/build
fi
else
if [ "$CONFIG_VHOST" = "y" ]; then
echo "Vhost is only supported when using the default DPDK environment. Disabling it."
fi
# Always disable vhost, but only print the error message if the user explicitly turned it on.
CONFIG_VHOST="n"
if [ "$CONFIG_VIRTIO" = "y" ]; then
echo "Virtio is only supported when using the default DPDK environment. Disabling it."
fi
# Always disable virtio, but only print the error message if the user explicitly turned it on.
CONFIG_VIRTIO="n"
fi
if [ "$CONFIG_FIO_PLUGIN" = "y" ]; then
if [ -z "$FIO_SOURCE_DIR" ]; then
echo "When fio is enabled, you must specify the fio directory using --with-fio=path"
exit 1
fi
fi
if [ "$CONFIG_VTUNE" = "y" ]; then
if [ -z "$VTUNE_SOURCE_DIR" ]; then
echo "When VTune is enabled, you must specify the VTune directory using --with-vtune=path"
exit 1
fi
fi
if [ "$CONFIG_ASAN" = "y" -a "$CONFIG_TSAN" = "y" ]; then
echo "ERROR: ASAN and TSAN cannot be enabled at the same time."
exit 1
fi
if [[ "$OSTYPE" == "freebsd"* ]]; then
# FreeBSD doesn't support all configurations
if [[ "$CONFIG_COVERAGE" == "y" ]]; then
echo "ERROR: CONFIG_COVERAGE not available on FreeBSD"
exit 1
fi
fi
if [ "$CONFIG_RDMA" = "y" ]; then
if [ "$OSTYPE" != "FreeBSD"* ]; then
ibv_lib_file="$(readlink -f /usr/lib64/libibverbs.so)" || true
if [ -z $ibv_lib_file ]; then
ibv_lib_file="libibverbs.so.0.0.0"
fi
ibv_ver_str="$(basename $ibv_lib_file)"
ibv_maj_ver=`echo $ibv_ver_str | cut -d. -f3`
ibv_min_ver=`echo $ibv_ver_str | cut -d. -f4`
if [[ "$ibv_maj_var" > 1 || ("$ibv_maj_ver" -eq 1 && "$ibv_min_ver" -ge 1) ]]; then
CONFIG_RDMA_SEND_WITH_INVAL="y"
else
CONFIG_RDMA_SEND_WITH_INVAL="n"
cat <<EOF
*******************************************************************************
WARNING: The Infiniband Verbs opcode Send With Invalidate is either not
supported or is not functional with the current version of libibverbs installed
on this system. Please upgrade to at least version 1.1.
Beginning with Linux kernel 4.14, the kernel NVMe-oF initiator leverages Send
With Invalidate RDMA operations to improve performance. Failing to use the
Send With Invalidate operation on the NVMe-oF target side results in full
functionality, but greatly reduced performance. The SPDK NVMe-oF target will
be unable to leverage that operation using the currently installed version
of libibverbs, so Linux kernel NVMe-oF initiators based on kernels greater
than or equal to 4.14 will see significantly reduced performance.
*******************************************************************************
EOF
fi
fi
fi
echo -n "Creating CONFIG.local..."
# Write the configuration file
rm -f CONFIG.local
if [ -n "$CONFIG_PREFIX" ]; then
echo "CONFIG_PREFIX?=$CONFIG_PREFIX" >> CONFIG.local
fi
if [ -n "$CONFIG_DEBUG" ]; then
echo "CONFIG_DEBUG?=$CONFIG_DEBUG" >> CONFIG.local
fi
if [ -n "$CONFIG_LOG_BACKTRACE" ]; then
echo "CONFIG_LOG_BACKTRACE?=$CONFIG_LOG_BACKTRACE" >> CONFIG.local
fi
if [ -n "$CONFIG_WERROR" ]; then
echo "CONFIG_WERROR?=$CONFIG_WERROR" >> CONFIG.local
fi
if [ -n "$CONFIG_COVERAGE" ]; then
echo "CONFIG_COVERAGE?=$CONFIG_COVERAGE" >> CONFIG.local
fi
if [ -n "$CONFIG_LTO" ]; then
echo "CONFIG_LTO?=$CONFIG_LTO" >> CONFIG.local
fi
if [ -n "$CONFIG_TESTS" ]; then
echo "CONFIG_TESTS?=$CONFIG_TESTS" >> CONFIG.local
fi
if [ -n "$CONFIG_ASAN" ]; then
echo "CONFIG_ASAN?=$CONFIG_ASAN" >> CONFIG.local
fi
if [ -n "$CONFIG_UBSAN" ]; then
echo "CONFIG_UBSAN?=$CONFIG_UBSAN" >> CONFIG.local
fi
if [ -n "$CONFIG_TSAN" ]; then
echo "CONFIG_TSAN?=$CONFIG_TSAN" >> CONFIG.local
fi
if [ -n "$CONFIG_ENV" ]; then
echo "CONFIG_ENV?=$CONFIG_ENV" >> CONFIG.local
fi
if [ -n "$CONFIG_DPDK_DIR" ]; then
echo "CONFIG_DPDK_DIR?=$CONFIG_DPDK_DIR" >> CONFIG.local
fi
if [ -n "$CONFIG_CRYPTO" ]; then
echo "CONFIG_CRYPTO?=$CONFIG_CRYPTO" >> CONFIG.local
fi
if [ -n "$CONFIG_VHOST" ]; then
echo "CONFIG_VHOST?=$CONFIG_VHOST" >> CONFIG.local
fi
if [ -n "$CONFIG_VIRTIO" ]; then
echo "CONFIG_VIRTIO?=$CONFIG_VIRTIO" >> CONFIG.local
fi
if [ -n "$CONFIG_PMDK" ]; then
echo "CONFIG_PMDK?=$CONFIG_PMDK" >> CONFIG.local
fi
if [ -n "$CONFIG_PMDK_DIR" ]; then
echo "CONFIG_PMDK_DIR?=$CONFIG_PMDK_DIR" >> CONFIG.local
fi
if [ -n "$CONFIG_VPP" ]; then
echo "CONFIG_VPP?=$CONFIG_VPP" >> CONFIG.local
fi
if [ -n "$CONFIG_VPP_DIR" ]; then
echo "CONFIG_VPP_DIR?=$CONFIG_VPP_DIR" >> CONFIG.local
fi
if [ -n "$CONFIG_FIO_PLUGIN" ]; then
echo "CONFIG_FIO_PLUGIN?=$CONFIG_FIO_PLUGIN" >> CONFIG.local
fi
if [ -n "$FIO_SOURCE_DIR" ]; then
echo "FIO_SOURCE_DIR?=$FIO_SOURCE_DIR" >> CONFIG.local
fi
if [ -n "$CONFIG_RDMA" ]; then
echo "CONFIG_RDMA?=$CONFIG_RDMA" >> CONFIG.local
fi
if [ -n "$CONFIG_RDMA_SEND_WITH_INVAL" ]; then
echo "CONFIG_RDMA_SEND_WITH_INVAL?=$CONFIG_RDMA_SEND_WITH_INVAL" >> CONFIG.local
fi
if [ -n "$CONFIG_SHARED" ]; then
echo "CONFIG_SHARED?=$CONFIG_SHARED" >> CONFIG.local
fi
if [ -n "$CONFIG_ISCSI_INITIATOR" ]; then
echo "CONFIG_ISCSI_INITIATOR?=$CONFIG_ISCSI_INITIATOR" >> CONFIG.local
fi
if [ -n "$CONFIG_RBD" ]; then
echo "CONFIG_RBD?=$CONFIG_RBD" >> CONFIG.local
fi
bdev: add raid bdev module Raid module: ============ - SPDK raid bdev module is a new bdev module which is responsible for striping various NVMe devices and expose the raid bdev to bdev layer which would enhance the performance and capacity. - It can support theoretically 256 base devices (currently it is being tested max upto 8 base devices) - Multiple strip sizes like 32KB, 64KB, 128KB, 256KB, 512KB etc is supported. Most of the current testing is focused on 64KB strip size. - New RPC commands like "create raid bdev", "destroy raid bdev" and "get raid bdevs" are introduced to configure raid bdev dynamically in a running SPDK system. - Currently raid bdev configuration parameters are persisted in the current SPDK configuration file for across reboot support. DDF will be introduced later. High level testing done: ======================= - Raid bdev is created with 8 base NVMe devices via configuration file and is exposed to initiator via existing methods. Initiator is able to see a single NVMe namespace with capacity equal to sum of the minimum capacities of 8 devices. Initiator was able to run raw read/write workload, file system workload etc (tested with XFS file system workload). - Multiple raid bdevs are also created and exposed to initiator and tested with file system and other workloads for read/write IO. - LVS / LVOL are created over raid bdev and exposed to initiator. Testing was done for raw read/write workloads and XFS file system workloads. - RPC testing is done where on the running SPDK system raid bdevs are created out of NVMe base devices. These raid bdevs (and LVOLs over raid bdevs) are then exposed to initiator and IO workload was tested for raw read/write and XFS file system workload. - RPC testing is done for delete raid bdevs where all raid bdevs are deleted in running SPDK system. - RPC testing is done for get raid bdevs where existing list of raid bdev names is printed (it can be all raid bdevs or only online or only configuring or only offline). - RPC testing is done where raid bdevs and underlying NVMe devices relationship was returned in JSON RPC commands Change-Id: I10ae1266f8f2cca3c106e4df8c1c0993ddf435d8 Signed-off-by: Kunal Sablok <kunal.sablok@intel.com> Reviewed-on: https://review.gerrithub.io/410484 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
2018-05-08 11:30:29 +00:00
if [ -n "$CONFIG_RAID" ]; then
echo "CONFIG_RAID?=$CONFIG_RAID" >> CONFIG.local
fi
if [ -n "$CONFIG_VTUNE" ]; then
echo "CONFIG_VTUNE?=$CONFIG_VTUNE" >> CONFIG.local
fi
if [ -n "$VTUNE_SOURCE_DIR" ]; then
echo "VTUNE_SOURCE_DIR?=$VTUNE_SOURCE_DIR" >> CONFIG.local
fi
# Environment variables
if [ -n "$CFLAGS" ]; then
echo "CFLAGS?=$CFLAGS" >> CONFIG.local
fi
if [ -n "$CXXFLAGS" ]; then
echo "CXXFLAGS?=$CXXFLAGS" >> CONFIG.local
fi
if [ -n "$LDFLAGS" ]; then
echo "LDFLAGS?=$LDFLAGS" >> CONFIG.local
fi
if [ -n "$DESTDIR" ]; then
echo "DESTDIR?=$DESTDIR" >> CONFIG.local
fi
python_command=
for interpreter in python3 python2 python ; do
type $interpreter > /dev/null 2>&1 || continue
python_command=$interpreter
break
done
if [[ -z "$python_command" ]] ; then
echo "Could not find python interpreter! Bailing out."
exit 1
fi
rm -f PYTHON_COMMAND
echo $python_command > PYTHON_COMMAND
$python_command scripts/genconfig.py > config.h
echo "done."
if [[ "$OSTYPE" == "freebsd"* ]]; then
echo "Type 'gmake' to build."
else
echo "Type 'make' to build."
fi
exit 0