numam-spdk/etc/spdk/nvmf.conf.in

109 lines
3.8 KiB
Plaintext
Raw Normal View History

# NVMf Target Configuration File
#
# Please write all parameters using ASCII.
# The parameter must be quoted if it includes whitespace.
#
# Configuration syntax:
# Leading whitespace is ignored.
# Lines starting with '#' are comments.
# Lines ending with '\' are concatenated with the next line.
# Bracketed ([]) names define sections
[Global]
# Users can restrict work items to only run on certain cores by
# specifying a ReactorMask. Default ReactorMask mask is defined as
# -c option in the 'ealargs' setting at beginning of file nvmf_tgt.c.
#ReactorMask 0x00FF
# Tracepoint group mask for spdk trace buffers
# Default: 0x0 (all tracepoint groups disabled)
# Set to 0xFFFFFFFFFFFFFFFF to enable all tracepoint groups.
#TpointGroupMask 0x0
# syslog facility
LogFacility "local7"
[Rpc]
# Defines whether to enable configuration via RPC.
# Default is disabled. Note that the RPC interface is not
# authenticated, so users should be careful about enabling
# RPC in non-trusted environments.
Enable No
# Users may change this section to create a different number or size of
# malloc LUNs.
# This will generate 8 LUNs with a malloc-allocated backend.
# Each LUN will be size 64MB and these will be named
# Malloc0 through Malloc7. Not all LUNs defined here are necessarily
# used below.
[Malloc]
NumberOfLuns 8
LunSizeInMB 64
# Define NVMf protocol global options
[Nvmf]
# Set the maximum number of submission and completion queues per session.
# Setting this to '8', for example, allows for 8 submission and 8 completion queues
# per session.
MaxQueuesPerSession 4
# Set the maximum number of outstanding I/O per queue.
#MaxQueueDepth 128
# Set the maximum in-capsule data size. Must be a multiple of 16.
#InCapsuleDataSize 4096
# Set the maximum I/O size. Must be a multiple of 4096.
#MaxIOSize 131072
# Set the global acceptor lcore ID, lcores are numbered starting at 0.
#AcceptorCore 0
# Set how often the acceptor polls for incoming connections. The acceptor is also
# responsible for polling existing connections that have gone idle. 0 means continuously
# poll. Units in microseconds.
AcceptorPollRate 10000
# Define an NVMf Subsystem.
# - NQN is required and must be unique.
# - Core may be set or not. If set, the specified subsystem will run on
# it, otherwise each subsystem will use a round-robin method to allocate
# core from available cores, lcores are numbered starting at 0.
# - Mode may be either "Direct" or "Virtual". Direct means that physical
# devices attached to the target will be presented to hosts as if they
# were directly attached to the host. No software emulation or command
# validation is performed. Virtual means that an NVMe controller is
# emulated in software and the namespaces it contains map to block devices
# on the target system. These block devices do not need to be NVMe devices.
# Only Direct mode is currently supported.
# - Between 1 and 255 Listen directives are allowed. This defines
# the addresses on which new connections may be accepted. The format
# is Listen <type> <address> where type currently can only be RDMA.
# - Between 0 and 255 Host directives are allowed. This defines the
# NQNs of allowed hosts. If no Host directive is specified, all hosts
# are allowed to connect.
# - Exactly 1 NVMe directive specifying an NVMe device by PCI BDF. The
# PCI domain:bus:device.function can be replaced by "*" to indicate
# any PCI device.
# Direct controller
[Subsystem1]
NQN nqn.2016-06.io.spdk:cnode1
Core 0
Mode Direct
Listen RDMA 15.15.15.2:4420
Host nqn.2016-06.io.spdk:init
NVMe 0000:00:00.0
# Multiple subsystems are allowed.
# Virtual controller
[Subsystem2]
NQN nqn.2016-06.io.spdk:cnode2
Core 0
Mode Virtual
Listen RDMA 192.168.2.21:4420
Host nqn.2016-06.io.spdk:init
SN SPDK00000000000001
Namespace Malloc0
Namespace Malloc1