numam-spdk/etc/spdk/iscsi.conf.in
Daniel Verkamp 6b5a1d6c24 bdev/pmem: add conf support to test in blockdev.sh
The long-term plan is to use the JSON-based configuration format, but
for now, we need a config file section to be able to test a bdev module
in blockdev.sh.

Change-Id: I2a69f7172693ed6d4939a3b938747e2a1c62ff83
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/405908
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
2018-04-03 14:14:29 -04:00

204 lines
6.7 KiB
Plaintext

# iSCSI target configuration file
#
# Please write all parameters using ASCII.
# The parameter must be quoted if it includes whitespace.
#
# Configuration syntax:
# Leading whitespace is ignored.
# Lines starting with '#' are comments.
# Lines ending with '\' are concatenated with the next line.
# Bracketed ([]) names define sections
[Global]
# Shared Memory Group ID. SPDK applications with the same ID will share memory.
# Default: <the process PID>
#SharedMemoryID 0
# Users can restrict work items to only run on certain cores by
# specifying a ReactorMask. Default is to allow work items to run
# on core 0.
#ReactorMask 0xFFFF
# Disable PCI access. PCI is enabled by default. Setting this
# option will hide any PCI device from all SPDK modules, making
# SPDK act as if they don't exist.
#NoPci Yes
# Tracepoint group mask for spdk trace buffers
# Default: 0x0 (all tracepoint groups disabled)
# Set to 0xFFFFFFFFFFFFFFFF to enable all tracepoint groups.
#TpointGroupMask 0x0
[iSCSI]
# node name (not include optional part)
# Users can optionally change this to fit their environment.
NodeBase "iqn.2016-06.io.spdk"
AuthFile /usr/local/etc/spdk/auth.conf
MinConnectionsPerCore 4
# Socket I/O timeout sec. (0 is infinite)
Timeout 30
# authentication information for discovery session
# Options:
# None, Auto, CHAP and Mutual. Note that Mutual infers CHAP.
DiscoveryAuthMethod Auto
#MaxSessions 128
#MaxConnectionsPerSession 2
# iSCSI initial parameters negotiate with initiators
# NOTE: incorrect values might crash
DefaultTime2Wait 2
DefaultTime2Retain 60
ImmediateData Yes
ErrorRecoveryLevel 0
# Users must change the PortalGroup section(s) to match the IP addresses
# for their environment.
# PortalGroup sections define which network portals the iSCSI target
# will use to listen for incoming connections. These are also used to
# determine which targets are accessible over each portal group.
# Up to 1024 portal directives are allowed. These define the network
# portals of the portal group. The user must specify a IP address
# for each network portal, and may optionally specify a port and
# a cpumask. If the port is omitted, 3260 will be used. Cpumask will
# be used to set the processor affinity of the iSCSI connection
# through the portal. If the cpumask is omitted, cpumask will be
# set to all available processors.
# Syntax:
# Portal <Name> <IP address>[:<port>[@<cpumask>]]
[PortalGroup1]
Portal DA1 192.168.2.21:3260
Portal DA2 192.168.2.22:3260@0xF
# Users must change the InitiatorGroup section(s) to match the IP
# addresses and initiator configuration in their environment.
# Netmask can be used to specify a single IP address or a range of IP addresses
# Netmask 192.168.1.20 <== single IP address
# Netmask 192.168.1.0/24 <== IP range 192.168.1.*
[InitiatorGroup1]
InitiatorName ANY
Netmask 192.168.2.0/24
# NVMe configuration options
[Nvme]
# NVMe Device Whitelist
# Users may specify which NVMe devices to claim by their transport id.
# See spdk_nvme_transport_id_parse() in spdk/nvme.h for the correct format.
# The second argument is the assigned name, which can be referenced from
# other sections in the configuration file. For NVMe devices, a namespace
# is automatically appended to each name in the format <YourName>nY, where
# Y is the NSID (starts at 1).
TransportID "trtype:PCIe traddr:0000:00:00.0" Nvme0
TransportID "trtype:PCIe traddr:0000:01:00.0" Nvme1
# The number of attempts per I/O when an I/O fails. Do not include
# this key to get the default behavior.
RetryCount 4
# Timeout for each command, in seconds. If 0, don't track timeouts.
Timeout 0
# Action to take on command time out. Only valid when Timeout is greater
# than 0. This may be 'Reset' to reset the controller, 'Abort' to abort
# the command, or 'None' to just print a message but do nothing.
# Admin command timeouts will always result in a reset.
ActionOnTimeout None
# Set how often the admin queue is polled for asynchronous events.
# Units in microseconds.
AdminPollRate 100000
# Disable handling of hotplug (runtime insert and remove) events,
# users can set to Yes if want to enable it.
# Default: No
HotplugEnable No
# Set how often the hotplug is processed for insert and remove events.
# Units in microseconds.
HotplugPollRate 0
# Users may change this section to create a different number or size of
# malloc LUNs.
# If the system has hardware DMA engine, it will use an IOAT
# (i.e. Crystal Beach DMA) channel to do the copy instead of memcpy.
# Of course, users can disable offload even it is available.
[Malloc]
# Number of Malloc targets
NumberOfLuns 3
# Malloc targets are 128M
LunSizeInMB 128
# Block size. Default is 512 bytes.
BlockSize 4096
# Users may not want to use offload even it is available.
# Users may use the whitelist to initialize specified devices, IDS
# uses BUS:DEVICE.FUNCTION to identify each Ioat channel.
[Ioat]
Disable Yes
Whitelist 00:04.0
Whitelist 00:04.1
# Users must change this section to match the /dev/sdX devices to be
# exported as iSCSI LUNs. The devices are accessed using Linux AIO.
# The format is:
# AIO <file name> <bdev name> [<block size>]
# The file name is the backing device
# The bdev name can be referenced from elsewhere in the configuration file.
# Block size may be omitted to automatically detect the block size of a disk.
[AIO]
AIO /dev/sdb AIO0
AIO /dev/sdc AIO1
AIO /tmp/myfile AIO2 4096
# PMDK libpmemblk-based block device
[Pmem]
# Syntax:
# Blk <pmemblk pool file name> <bdev name>
Blk /path/to/pmem-pool Pmem0
# The Split virtual block device slices block devices into multiple smaller bdevs.
[Split]
# Syntax:
# Split <bdev> <count> [<size_in_megabytes>]
# Split Malloc1 into two equally-sized portions, Malloc1p0 and Malloc1p1
Split Malloc1 2
# Split Malloc2 into eight 1-megabyte portions, Malloc2p0 ... Malloc2p7,
# leaving the rest of the device inaccessible
Split Malloc2 8 1
# Users should change the TargetNode section(s) below to match the
# desired iSCSI target node configuration.
# TargetName, Mapping, LUN0 are minimum required
[TargetNode1]
TargetName disk1
TargetAlias "Data Disk1"
Mapping PortalGroup1 InitiatorGroup1
AuthMethod Auto
AuthGroup AuthGroup1
# Enable header and data digest
# UseDigest Header Data
UseDigest Auto
# Use the first malloc target
LUN0 Malloc0
# Using the first AIO target
LUN1 AIO0
# Using the second storage target
LUN2 AIO1
# Using the third storage target
LUN3 AIO2
QueueDepth 128
[TargetNode2]
TargetName disk2
TargetAlias "Data Disk2"
Mapping PortalGroup1 InitiatorGroup1
AuthMethod Auto
AuthGroup AuthGroup1
UseDigest Auto
LUN0 Nvme0n1
QueueDepth 32