numam-spdk/etc/spdk/iscsi.conf.in
Ben Walker af8affd1a9 iscsi: Deprecate MinConnectionsPerCore
iSCSI is currently being adapted to SPDK's new threading model,
and this parameter doesn't make sense anymore. Remove it for
now until something functionally equivalent is added later.

Change-Id: Ia0e2f5aa81b72d99467c5a900619fbeeb42b2069
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/452779
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-05-06 17:10:48 +00:00

234 lines
7.7 KiB
Plaintext

# iSCSI target configuration file
#
# Please write all parameters using ASCII.
# The parameter must be quoted if it includes whitespace.
#
# Configuration syntax:
# Leading whitespace is ignored.
# Lines starting with '#' are comments.
# Lines ending with '\' are concatenated with the next line.
# Bracketed ([]) names define sections
[Global]
# Shared Memory Group ID. SPDK applications with the same ID will share memory.
# Default: <the process PID>
#SharedMemoryID 0
# Disable PCI access. PCI is enabled by default. Setting this
# option will hide any PCI device from all SPDK modules, making
# SPDK act as if they don't exist.
#NoPci Yes
# Tracepoint group mask for spdk trace buffers
# Default: 0x0 (all tracepoint groups disabled)
# Set to 0xFFFF to enable all tracepoint groups.
#TpointGroupMask 0x0
# Users may activate entries in this section to override default values for
# global parameters in the block device (bdev) subsystem.
[Bdev]
# Number of spdk_bdev_io structures allocated in the global bdev subsystem pool.
#BdevIoPoolSize 65536
# Maximum number of spdk_bdev_io structures to cache per thread.
#BdevIoCacheSize 256
[iSCSI]
# node name (not include optional part)
# Users can optionally change this to fit their environment.
NodeBase "iqn.2016-06.io.spdk"
AuthFile /usr/local/etc/spdk/auth.conf
# Socket I/O timeout sec. (0 is infinite)
Timeout 30
# authentication information for discovery session
# Options:
# None, Auto, CHAP and Mutual. Note that Mutual infers CHAP.
DiscoveryAuthMethod Auto
#MaxSessions 128
#MaxConnectionsPerSession 2
# iSCSI initial parameters negotiate with initiators
# NOTE: incorrect values might crash
DefaultTime2Wait 2
DefaultTime2Retain 60
# Maximum amount in bytes of unsolicited data the iSCSI
# initiator may send to the target during the execution of
# a single SCSI command.
FirstBurstLength 8192
ImmediateData Yes
ErrorRecoveryLevel 0
# Users must change the PortalGroup section(s) to match the IP addresses
# for their environment.
# PortalGroup sections define which network portals the iSCSI target
# will use to listen for incoming connections. These are also used to
# determine which targets are accessible over each portal group.
# Up to 1024 portal directives are allowed. These define the network
# portals of the portal group. The user must specify a IP address
# for each network portal, and may optionally specify a port and
# a cpumask. If the port is omitted, 3260 will be used. Cpumask will
# be used to set the processor affinity of the iSCSI connection
# through the portal. If the cpumask is omitted, cpumask will be
# set to all available processors.
# Syntax:
# Portal <Name> <IP address>[:<port>[@<cpumask>]]
[PortalGroup1]
Portal DA1 192.168.2.21:3260
Portal DA2 192.168.2.22:3260@0xF
# Users must change the InitiatorGroup section(s) to match the IP
# addresses and initiator configuration in their environment.
# Netmask can be used to specify a single IP address or a range of IP addresses
# Netmask 192.168.1.20 <== single IP address
# Netmask 192.168.1.0/24 <== IP range 192.168.1.*
[InitiatorGroup1]
InitiatorName ANY
Netmask 192.168.2.0/24
# NVMe configuration options
[Nvme]
# NVMe Device Whitelist
# Users may specify which NVMe devices to claim by their transport id.
# See spdk_nvme_transport_id_parse() in spdk/nvme.h for the correct format.
# The second argument is the assigned name, which can be referenced from
# other sections in the configuration file. For NVMe devices, a namespace
# is automatically appended to each name in the format <YourName>nY, where
# Y is the NSID (starts at 1).
TransportID "trtype:PCIe traddr:0000:00:00.0" Nvme0
TransportID "trtype:PCIe traddr:0000:01:00.0" Nvme1
# The number of attempts per I/O when an I/O fails. Do not include
# this key to get the default behavior.
RetryCount 4
# Timeout for each command, in microseconds. If 0, don't track timeouts.
TimeoutUsec 0
# Action to take on command time out. Only valid when Timeout is greater
# than 0. This may be 'Reset' to reset the controller, 'Abort' to abort
# the command, or 'None' to just print a message but do nothing.
# Admin command timeouts will always result in a reset.
ActionOnTimeout None
# Set how often the admin queue is polled for asynchronous events.
# Units in microseconds.
AdminPollRate 100000
# Set how often I/O queues are polled from completions.
# Units in microseconds.
IOPollRate 0
# Disable handling of hotplug (runtime insert and remove) events,
# users can set to Yes if want to enable it.
# Default: No
HotplugEnable No
# Set how often the hotplug is processed for insert and remove events.
# Units in microseconds.
HotplugPollRate 0
# Users may change this section to create a different number or size of
# malloc LUNs.
# If the system has hardware DMA engine, it can use an IOAT
# (i.e. Crystal Beach DMA) channel to do the copy instead of memcpy
# by specifying "Enable Yes" in [Ioat] section.
# Offload is disabled by default even it is available.
[Malloc]
# Number of Malloc targets
NumberOfLuns 5
# Malloc targets are 128M
LunSizeInMB 128
# Block size. Default is 512 bytes.
BlockSize 4096
# Users can use offload by specifying "Enable Yes" in this section
# if it is available.
# Users may use the whitelist to initialize specified devices, IDS
# uses BUS:DEVICE.FUNCTION to identify each Ioat channel.
[Ioat]
Enable No
Whitelist 00:04.0
Whitelist 00:04.1
# Users must change this section to match the /dev/sdX devices to be
# exported as iSCSI LUNs. The devices are accessed using Linux AIO.
# The format is:
# AIO <file name> <bdev name> [<block size>]
# The file name is the backing device
# The bdev name can be referenced from elsewhere in the configuration file.
# Block size may be omitted to automatically detect the block size of a disk.
[AIO]
AIO /dev/sdb AIO0
AIO /dev/sdc AIO1
AIO /tmp/myfile AIO2 4096
# PMDK libpmemblk-based block device
[Pmem]
# Syntax:
# Blk <pmemblk pool file name> <bdev name>
Blk /path/to/pmem-pool Pmem0
# The Split virtual block device slices block devices into multiple smaller bdevs.
[Split]
# Syntax:
# Split <bdev> <count> [<size_in_megabytes>]
# Split Malloc1 into two equally-sized portions, Malloc1p0 and Malloc1p1
Split Malloc1 2
# Split Malloc2 into eight 1-megabyte portions, Malloc2p0 ... Malloc2p7,
# leaving the rest of the device inaccessible
Split Malloc2 8 1
# The RAID virtual block device based on pre-configured block device.
[RAID1]
# Unique name of this RAID device.
Name Raid0
# RAID level, only raid level 0 is supported.
RaidLevel 0
# Strip size in KB.
StripSize 64
# Number of pre-configured bdevs.
NumDevices 2
# Pre-configured bdevs name with Nvme.
#Devices Nvme0n1 Nvme1n1
# Pre-configured bdevs name with Malloc.
Devices Malloc3 Malloc4
# Pre-configured bdevs name with AIO.
#Devices AIO0 AIO1
# Users should change the TargetNode section(s) below to match the
# desired iSCSI target node configuration.
# TargetName, Mapping, LUN0 are minimum required
[TargetNode1]
TargetName disk1
TargetAlias "Data Disk1"
Mapping PortalGroup1 InitiatorGroup1
AuthMethod Auto
AuthGroup AuthGroup1
# Enable header and data digest
# UseDigest Header Data
UseDigest Auto
# Use the first malloc target
LUN0 Malloc0
# Using the first AIO target
LUN1 AIO0
# Using the second storage target
LUN2 AIO1
# Using the third storage target
LUN3 AIO2
QueueDepth 128
[TargetNode2]
TargetName disk2
TargetAlias "Data Disk2"
Mapping PortalGroup1 InitiatorGroup1
AuthMethod Auto
AuthGroup AuthGroup1
UseDigest Auto
LUN0 Nvme0n1
LUN1 Raid0
QueueDepth 32