numam-spdk/etc/spdk/iscsi.conf.in
Ben Walker 25270f1d7c Rename instance_id to shm_id and make it default to pid
By default, all SPDK applications will not share memory.
To share memory, start the applications with the same
shared memory id.

Change-Id: Ib6180369ef0ed12d05983a21d7943e467402b21a
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
2017-02-15 17:16:37 -07:00

186 lines
5.8 KiB
Plaintext

# iSCSI target configuration file
#
# Please write all parameters using ASCII.
# The parameter must be quoted if it includes whitespace.
#
# Configuration syntax:
# Leading whitespace is ignored.
# Lines starting with '#' are comments.
# Lines ending with '\' are concatenated with the next line.
# Bracketed ([]) names define sections
[Global]
# Shared Memory Group ID. SPDK applications with the same ID will share memory.
# Default: <the process PID>
#SharedMemoryID 0
# Users can restrict work items to only run on certain cores by
# specifying a ReactorMask. Default is to allow work items to run
# on core 0.
#ReactorMask 0xFFFF
# Tracepoint group mask for spdk trace buffers
# Default: 0x0 (all tracepoint groups disabled)
# Set to 0xFFFFFFFFFFFFFFFF to enable all tracepoint groups.
#TpointGroupMask 0x0
# syslog facility
LogFacility "local7"
[iSCSI]
# node name (not include optional part)
# Users can optionally change this to fit their environment.
NodeBase "iqn.2016-06.io.spdk"
AuthFile /usr/local/etc/spdk/auth.conf
MinConnectionsPerCore 4
# Power saving related variable, this parameter defines how long an iSCSI
# connection must be idle before moving it to a state where it will consume
# less power. This variable is defined in terms of microseconds. We set default
# value as 5ms.
MinConnectionIdleInterval 5000
# Socket I/O timeout sec. (0 is infinite)
Timeout 30
# authentication information for discovery session
DiscoveryAuthMethod Auto
#MaxSessions 128
#MaxConnectionsPerSession 2
# iSCSI initial parameters negotiate with initiators
# NOTE: incorrect values might crash
DefaultTime2Wait 2
DefaultTime2Retain 60
ImmediateData Yes
ErrorRecoveryLevel 0
[Rpc]
# Defines whether to enable configuration via RPC.
# Default is disabled. Note that the RPC interface is not
# authenticated, so users should be careful about enabling
# RPC in non-trusted environments.
Enable No
# Listen address for the RPC service.
# May be an IP address or an absolute path to a Unix socket.
Listen 127.0.0.1
# Users must change the PortalGroup section(s) to match the IP addresses
# for their environment.
# PortalGroup sections define which TCP ports the iSCSI server will use
# to listen for incoming connections. These are also used to determine
# which targets are accessible over each portal group.
[PortalGroup1]
Portal DA1 192.168.2.21:3260
# Users must change the InitiatorGroup section(s) to match the IP
# addresses and initiator configuration in their environment.
# Netmask can be used to specify a single IP address or a range of IP addresses
# Netmask 192.168.1.20 <== single IP address
# Netmask 192.168.1.0/24 <== IP range 192.168.1.*
[InitiatorGroup1]
InitiatorName ALL
Netmask 192.168.2.0/24
# NVMe configuration options
[Nvme]
# NVMe Device Whitelist
# Users may specify which NVMe devices to claim by their PCI
# domain, bus, device, and function. The format is dddd:bb:dd.f, which is
# the same format displayed by lspci or in /sys/bus/pci/devices. The second
# argument is a "name" for the device that can be anything. The name
# is referenced later in the Subsystem section.
#
# Alternatively, the user can specify ClaimAllDevices. All
# NVMe devices will be claimed and named Nvme0, Nvme1, etc.
BDF 0000:00:00.0 Nvme0
BDF 0000:01:00.0 Nvme1
# The number of attempts per I/O when an I/O fails. Do not include
# this key to get the default behavior.
NvmeRetryCount 4
# The maximum number of NVMe controllers to claim. Do not include this key to
# claim all of them.
NumControllers 2
# Registers the application to receive timeout callback and to reset the controller.
ResetControllerOnTimeout Yes
# Timeout value.
NvmeTimeoutValue 30
# Set how often the admin queue is polled for asynchronous events.
# Units in microseconds.
AdminPollRate 100000
# Users may change this section to create a different number or size of
# malloc LUNs.
# If the system has hardware DMA engine, it will use an IOAT
# (i.e. Crystal Beach DMA) channel to do the copy instead of memcpy.
# Of course, users can disable offload even it is available.
[Malloc]
# Number of Malloc targets
NumberOfLuns 3
# Malloc targets are 128M
LunSizeInMB 128
# Block size. Default is 512 bytes.
BlockSize 4096
# Users may not want to use offload even it is available.
# Users may use the whitelist to initialize specified devices, IDS
# uses BUS:DEVICE.FUNCTION to identify each Ioat channel.
[Ioat]
Disable Yes
Whitelist 00:04.0
Whitelist 00:04.1
# Users must change this section to match the /dev/sdX devices to be
# exported as iSCSI LUNs. The devices are accessed using Linux AIO.
[AIO]
AIO /dev/sdb
AIO /dev/sdc
# The Split virtual block device slices block devices into multiple smaller bdevs.
[Split]
# Syntax:
# Split <bdev> <count> [<size_in_megabytes>]
# Split Malloc1 into two equally-sized portions, Malloc1p0 and Malloc1p1
Split Malloc1 2
# Split Malloc2 into eight 1-megabyte portions, Malloc2p0 ... Malloc2p7,
# leaving the rest of the device inaccessible
Split Malloc2 8 1
# Users should change the TargetNode section(s) below to match the
# desired iSCSI target node configuration.
# TargetName, Mapping, LUN0 are minimum required
[TargetNode1]
TargetName disk1
TargetAlias "Data Disk1"
Mapping PortalGroup1 InitiatorGroup1
AuthMethod Auto
AuthGroup AuthGroup1
# Enable header and data digest
# UseDigest Header Data
UseDigest Auto
# Use the first malloc target
LUN0 Malloc0
# Using the first AIO target
LUN1 AIO0
# Using the second storage target
LUN2 AIO1
# Using the third storage target
LUN3 AIO2
QueueDepth 128
[TargetNode2]
TargetName disk2
TargetAlias "Data Disk2"
Mapping PortalGroup1 InitiatorGroup1
AuthMethod Auto
AuthGroup AuthGroup1
UseDigest Auto
LUN0 Nvme0
QueueDepth 32