nvmf: let transports parse transport opts from JSON themselves

There are more transport on the way and we don't want to add
all their various opts into the single, generic structure.
We'll pass the JSON structure to transports instead. Then
the transport code can custom pull from the JSON any param
it wants.

To complement that, transports will now also have their own
JSON config dump callback. This was only done in the generic
nvmf.c so far, with conditions for RDMA and TCP.

Change-Id: I33115a8d56cec829b1c51311a318e0333cc64920
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Signed-off-by: jiaqizho <jiaqi.zhou@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2761
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jacek Kalwas <jacek.kalwas@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Darek Stojaczyk 2020-06-03 16:03:03 +02:00 committed by Tomasz Zawadzki
parent 9ae19a413b
commit f766d1e478
10 changed files with 153 additions and 81 deletions

View File

@ -71,6 +71,11 @@ receive the notification when the scsi bdev has been resized.
The `spdk_scsi_dev_construct` and `spdk_scsi_dev_add_lun` eventually may be
deprecated and removed.
### nvmf
NVMf Target transports can now parse any additional JSON params in the nvmf_create_transport RPC
through the JSON context provided via spdk_nvmf_target_opts->transport_specific.
## v20.07: SPDK CSI driver, new accel_fw commands, I/O abort support
### accel

View File

@ -63,6 +63,7 @@ struct spdk_nvmf_host;
struct spdk_nvmf_subsystem_listener;
struct spdk_nvmf_poll_group;
struct spdk_json_write_ctx;
struct spdk_json_val;
struct spdk_nvmf_transport;
struct spdk_nvmf_target_opts {
@ -80,15 +81,13 @@ struct spdk_nvmf_transport_opts {
uint32_t max_aq_depth;
uint32_t num_shared_buffers;
uint32_t buf_cache_size;
uint32_t max_srq_depth;
bool no_srq;
bool c2h_success;
bool dif_insert_or_strip;
uint32_t sock_priority;
int acceptor_backlog;
uint32_t abort_timeout_sec;
/* ms */
uint32_t association_timeout;
const struct spdk_json_val *transport_specific;
};
struct spdk_nvmf_poll_group_stat {

View File

@ -224,6 +224,12 @@ struct spdk_nvmf_transport_ops {
*/
struct spdk_nvmf_transport *(*create)(struct spdk_nvmf_transport_opts *opts);
/**
* Dump transport-specific opts into JSON
*/
void (*dump_opts)(struct spdk_nvmf_transport *transport,
struct spdk_json_write_ctx *w);
/**
* Destroy the transport
*/

View File

@ -576,8 +576,8 @@ spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_
spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size);
spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size);
spdk_json_write_named_uint32(w, "max_aq_depth", transport->opts.max_aq_depth);
if (transport->ops->type == SPDK_NVME_TRANSPORT_RDMA) {
spdk_json_write_named_uint32(w, "max_srq_depth", transport->opts.max_srq_depth);
if (transport->ops->dump_opts) {
transport->ops->dump_opts(transport, w);
}
spdk_json_write_named_uint32(w, "abort_timeout_sec", transport->opts.abort_timeout_sec);
spdk_json_write_object_end(w);

View File

@ -1869,30 +1869,10 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
"buf_cache_size", offsetof(struct nvmf_rpc_create_transport_ctx, opts.buf_cache_size),
spdk_json_decode_uint32, true
},
{
"max_srq_depth", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_srq_depth),
spdk_json_decode_uint32, true
},
{
"no_srq", offsetof(struct nvmf_rpc_create_transport_ctx, opts.no_srq),
spdk_json_decode_bool, true
},
{
"c2h_success", offsetof(struct nvmf_rpc_create_transport_ctx, opts.c2h_success),
spdk_json_decode_bool, true
},
{
"dif_insert_or_strip", offsetof(struct nvmf_rpc_create_transport_ctx, opts.dif_insert_or_strip),
spdk_json_decode_bool, true
},
{
"sock_priority", offsetof(struct nvmf_rpc_create_transport_ctx, opts.sock_priority),
spdk_json_decode_uint32, true
},
{
"acceptor_backlog", offsetof(struct nvmf_rpc_create_transport_ctx, opts.acceptor_backlog),
spdk_json_decode_int32, true
},
{
"abort_timeout_sec", offsetof(struct nvmf_rpc_create_transport_ctx, opts.abort_timeout_sec),
spdk_json_decode_uint32, true
@ -1950,10 +1930,10 @@ rpc_nvmf_create_transport(struct spdk_jsonrpc_request *request,
}
/* Decode parameters the first time to get the transport type */
if (spdk_json_decode_object(params, nvmf_rpc_create_transport_decoder,
SPDK_COUNTOF(nvmf_rpc_create_transport_decoder),
ctx)) {
SPDK_ERRLOG("spdk_json_decode_object failed\n");
if (spdk_json_decode_object_relaxed(params, nvmf_rpc_create_transport_decoder,
SPDK_COUNTOF(nvmf_rpc_create_transport_decoder),
ctx)) {
SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
nvmf_rpc_create_transport_ctx_free(ctx);
return;
@ -1990,10 +1970,10 @@ rpc_nvmf_create_transport(struct spdk_jsonrpc_request *request,
return;
}
if (spdk_json_decode_object(params, nvmf_rpc_create_transport_decoder,
SPDK_COUNTOF(nvmf_rpc_create_transport_decoder),
ctx)) {
SPDK_ERRLOG("spdk_json_decode_object failed\n");
if (spdk_json_decode_object_relaxed(params, nvmf_rpc_create_transport_decoder,
SPDK_COUNTOF(nvmf_rpc_create_transport_decoder),
ctx)) {
SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
nvmf_rpc_create_transport_ctx_free(ctx);
return;
@ -2007,6 +1987,9 @@ rpc_nvmf_create_transport(struct spdk_jsonrpc_request *request,
return;
}
/* Transport can parse additional params themselves */
ctx->opts.transport_specific = params;
transport = spdk_nvmf_transport_create(ctx->trtype, &ctx->opts);
if (!transport) {
@ -2027,7 +2010,6 @@ static void
dump_nvmf_transport(struct spdk_json_write_ctx *w, struct spdk_nvmf_transport *transport)
{
const struct spdk_nvmf_transport_opts *opts = spdk_nvmf_get_transport_opts(transport);
spdk_nvme_transport_type_t type = spdk_nvmf_get_transport_type(transport);
spdk_json_write_object_begin(w);
@ -2041,14 +2023,11 @@ dump_nvmf_transport(struct spdk_json_write_ctx *w, struct spdk_nvmf_transport *t
spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
spdk_json_write_named_uint32(w, "buf_cache_size", opts->buf_cache_size);
spdk_json_write_named_bool(w, "dif_insert_or_strip", opts->dif_insert_or_strip);
if (type == SPDK_NVME_TRANSPORT_RDMA) {
spdk_json_write_named_uint32(w, "max_srq_depth", opts->max_srq_depth);
spdk_json_write_named_bool(w, "no_srq", opts->no_srq);
spdk_json_write_named_int32(w, "acceptor_backlog", opts->acceptor_backlog);
} else if (type == SPDK_NVME_TRANSPORT_TCP) {
spdk_json_write_named_bool(w, "c2h_success", opts->c2h_success);
spdk_json_write_named_uint32(w, "sock_priority", opts->sock_priority);
if (transport->ops->dump_opts) {
transport->ops->dump_opts(transport, w);
}
spdk_json_write_named_uint32(w, "abort_timeout_sec", opts->abort_timeout_sec);
spdk_json_write_object_end(w);

View File

@ -473,8 +473,15 @@ struct spdk_nvmf_rdma_port {
TAILQ_ENTRY(spdk_nvmf_rdma_port) link;
};
struct rdma_transport_opts {
uint32_t max_srq_depth;
bool no_srq;
int acceptor_backlog;
};
struct spdk_nvmf_rdma_transport {
struct spdk_nvmf_transport transport;
struct rdma_transport_opts rdma_opts;
struct spdk_nvmf_rdma_conn_sched conn_sched;
@ -493,6 +500,21 @@ struct spdk_nvmf_rdma_transport {
TAILQ_HEAD(, spdk_nvmf_rdma_poll_group) poll_groups;
};
static const struct spdk_json_object_decoder rdma_transport_opts_decoder[] = {
{
"max_srq_depth", offsetof(struct rdma_transport_opts, max_srq_depth),
spdk_json_decode_uint32, true
},
{
"no_srq", offsetof(struct rdma_transport_opts, no_srq),
spdk_json_decode_bool, true
},
{
"acceptor_backlog", offsetof(struct rdma_transport_opts, acceptor_backlog),
spdk_json_decode_int32, true
},
};
static bool
nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
struct spdk_nvmf_rdma_request *rdma_req);
@ -2225,11 +2247,9 @@ nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
opts->max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH;
opts->num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS;
opts->buf_cache_size = SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE;
opts->max_srq_depth = SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH;
opts->no_srq = SPDK_NVMF_RDMA_DEFAULT_NO_SRQ;
opts->dif_insert_or_strip = SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP;
opts->acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
opts->abort_timeout_sec = SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC;
opts->transport_specific = NULL;
}
const struct spdk_mem_map_ops g_nvmf_rdma_map_ops = {
@ -2285,6 +2305,17 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
TAILQ_INIT(&rtransport->poll_groups);
rtransport->transport.ops = &spdk_nvmf_transport_rdma;
rtransport->rdma_opts.max_srq_depth = SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH;
rtransport->rdma_opts.no_srq = SPDK_NVMF_RDMA_DEFAULT_NO_SRQ;
rtransport->rdma_opts.acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
if (opts->transport_specific != NULL &&
spdk_json_decode_object_relaxed(opts->transport_specific, rdma_transport_opts_decoder,
SPDK_COUNTOF(rdma_transport_opts_decoder),
&rtransport->rdma_opts)) {
SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
nvmf_rdma_destroy(&rtransport->transport);
return NULL;
}
SPDK_INFOLOG(rdma, "*** RDMA Transport Init ***\n"
" Transport opts: max_ioq_depth=%d, max_io_size=%d,\n"
@ -2299,9 +2330,9 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
opts->in_capsule_data_size,
opts->max_aq_depth,
opts->num_shared_buffers,
opts->max_srq_depth,
opts->no_srq,
opts->acceptor_backlog,
rtransport->rdma_opts.max_srq_depth,
rtransport->rdma_opts.no_srq,
rtransport->rdma_opts.acceptor_backlog,
opts->abort_timeout_sec);
/* I/O unit size cannot be larger than max I/O size */
@ -2309,10 +2340,10 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
opts->io_unit_size = opts->max_io_size;
}
if (opts->acceptor_backlog <= 0) {
if (rtransport->rdma_opts.acceptor_backlog <= 0) {
SPDK_ERRLOG("The acceptor backlog cannot be less than 1, setting to the default value of (%d).\n",
SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG);
opts->acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
rtransport->rdma_opts.acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
}
if (opts->num_shared_buffers < (SPDK_NVMF_MAX_SGL_ENTRIES * 2)) {
@ -2491,6 +2522,18 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
return &rtransport->transport;
}
static void
nvmf_rdma_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w)
{
struct spdk_nvmf_rdma_transport *rtransport;
assert(w != NULL);
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
spdk_json_write_named_uint32(w, "max_srq_depth", rtransport->rdma_opts.max_srq_depth);
spdk_json_write_named_bool(w, "no_srq", rtransport->rdma_opts.no_srq);
spdk_json_write_named_int32(w, "acceptor_backlog", rtransport->rdma_opts.acceptor_backlog);
}
static int
nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
{
@ -2630,7 +2673,7 @@ nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
return -1;
}
rc = rdma_listen(port->id, transport->opts.acceptor_backlog);
rc = rdma_listen(port->id, rtransport->rdma_opts.acceptor_backlog);
if (rc < 0) {
SPDK_ERRLOG("rdma_listen() failed\n");
rdma_destroy_id(port->id);
@ -3255,8 +3298,8 @@ nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
STAILQ_INIT(&poller->qpairs_pending_recv);
TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
if (transport->opts.no_srq == false && device->num_srq < device->attr.max_srq) {
poller->max_srq_depth = transport->opts.max_srq_depth;
if (rtransport->rdma_opts.no_srq == false && device->num_srq < device->attr.max_srq) {
poller->max_srq_depth = rtransport->rdma_opts.max_srq_depth;
device->num_srq++;
memset(&srq_init_attr, 0, sizeof(struct ibv_srq_init_attr));
@ -4229,6 +4272,7 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
.type = SPDK_NVME_TRANSPORT_RDMA,
.opts_init = nvmf_rdma_opts_init,
.create = nvmf_rdma_create,
.dump_opts = nvmf_rdma_dump_opts,
.destroy = nvmf_rdma_destroy,
.listen = nvmf_rdma_listen,

View File

@ -50,6 +50,8 @@
#define NVMF_TCP_MAX_ACCEPT_SOCK_ONE_TIME 16
#define SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY 16
#define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0
#define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true
const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp;
@ -266,14 +268,31 @@ struct spdk_nvmf_tcp_port {
TAILQ_ENTRY(spdk_nvmf_tcp_port) link;
};
struct tcp_transport_opts {
bool c2h_success;
uint32_t sock_priority;
};
struct spdk_nvmf_tcp_transport {
struct spdk_nvmf_transport transport;
struct tcp_transport_opts tcp_opts;
pthread_mutex_t lock;
TAILQ_HEAD(, spdk_nvmf_tcp_port) ports;
};
static const struct spdk_json_object_decoder tcp_transport_opts_decoder[] = {
{
"c2h_success", offsetof(struct tcp_transport_opts, c2h_success),
spdk_json_decode_bool, true
},
{
"sock_priority", offsetof(struct tcp_transport_opts, sock_priority),
spdk_json_decode_uint32, true
},
};
static bool nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
struct spdk_nvmf_tcp_req *tcp_req);
@ -437,6 +456,17 @@ nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair)
SPDK_DEBUGLOG(nvmf_tcp, "Leave\n");
}
static void
nvmf_tcp_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w)
{
struct spdk_nvmf_tcp_transport *ttransport;
assert(w != NULL);
ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
spdk_json_write_named_bool(w, "c2h_success", ttransport->tcp_opts.c2h_success);
spdk_json_write_named_uint32(w, "sock_priority", ttransport->tcp_opts.sock_priority);
}
static int
nvmf_tcp_destroy(struct spdk_nvmf_transport *transport)
{
@ -466,6 +496,17 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
ttransport->transport.ops = &spdk_nvmf_transport_tcp;
ttransport->tcp_opts.c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION;
ttransport->tcp_opts.sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY;
if (opts->transport_specific != NULL &&
spdk_json_decode_object_relaxed(opts->transport_specific, tcp_transport_opts_decoder,
SPDK_COUNTOF(tcp_transport_opts_decoder),
&ttransport->tcp_opts)) {
SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
free(ttransport);
return NULL;
}
SPDK_NOTICELOG("*** TCP Transport Init ***\n");
SPDK_INFOLOG(nvmf_tcp, "*** TCP Transport Init ***\n"
@ -482,15 +523,15 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
opts->in_capsule_data_size,
opts->max_aq_depth,
opts->num_shared_buffers,
opts->c2h_success,
ttransport->tcp_opts.c2h_success,
opts->dif_insert_or_strip,
opts->sock_priority,
ttransport->tcp_opts.sock_priority,
opts->abort_timeout_sec);
if (opts->sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) {
if (ttransport->tcp_opts.sock_priority > SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY) {
SPDK_ERRLOG("Unsupported socket_priority=%d, the current range is: 0 to %d\n"
"you can use man 7 socket to view the range of priority under SO_PRIORITY item\n",
opts->sock_priority, SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY);
ttransport->tcp_opts.sock_priority, SPDK_NVMF_TCP_DEFAULT_MAX_SOCK_PRIORITY);
free(ttransport);
return NULL;
}
@ -618,7 +659,7 @@ nvmf_tcp_listen(struct spdk_nvmf_transport *transport,
port->trid = trid;
opts.opts_size = sizeof(opts);
spdk_sock_get_default_opts(&opts);
opts.priority = transport->opts.sock_priority;
opts.priority = ttransport->tcp_opts.sock_priority;
port->listen_sock = spdk_sock_listen_ext(trid->traddr, trsvcid_int,
NULL, &opts);
if (port->listen_sock == NULL) {
@ -1295,9 +1336,11 @@ nvmf_tcp_pdu_c2h_data_complete(void *cb_arg)
struct spdk_nvmf_tcp_req *tcp_req = cb_arg;
struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair,
struct spdk_nvmf_tcp_qpair, qpair);
struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(
tcp_req->req.qpair->transport, struct spdk_nvmf_tcp_transport, transport);
assert(tqpair != NULL);
if (tqpair->qpair.transport->opts.c2h_success) {
if (ttransport->tcp_opts.c2h_success) {
nvmf_tcp_request_free(tcp_req);
} else {
nvmf_tcp_req_pdu_fini(tcp_req);
@ -1947,6 +1990,8 @@ static void
nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
struct spdk_nvmf_tcp_req *tcp_req)
{
struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(
tqpair->qpair.transport, struct spdk_nvmf_tcp_transport, transport);
struct nvme_tcp_pdu *rsp_pdu;
struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
uint32_t plen, pdo, alignment;
@ -2016,7 +2061,7 @@ nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
}
c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
if (tqpair->qpair.transport->opts.c2h_success) {
if (ttransport->tcp_opts.c2h_success) {
c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
}
@ -2586,9 +2631,7 @@ nvmf_tcp_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
#define SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE 131072
#define SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS 511
#define SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE 32
#define SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION true
#define SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP false
#define SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY 0
#define SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC 1
static void
@ -2602,10 +2645,9 @@ nvmf_tcp_opts_init(struct spdk_nvmf_transport_opts *opts)
opts->max_aq_depth = SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH;
opts->num_shared_buffers = SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS;
opts->buf_cache_size = SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE;
opts->c2h_success = SPDK_NVMF_TCP_DEFAULT_SUCCESS_OPTIMIZATION;
opts->dif_insert_or_strip = SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP;
opts->sock_priority = SPDK_NVMF_TCP_DEFAULT_SOCK_PRIORITY;
opts->abort_timeout_sec = SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC;
opts->transport_specific = NULL;
}
const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
@ -2613,6 +2655,7 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
.type = SPDK_NVME_TRANSPORT_TCP,
.opts_init = nvmf_tcp_opts_init,
.create = nvmf_tcp_create,
.dump_opts = nvmf_tcp_dump_opts,
.destroy = nvmf_tcp_destroy,
.listen = nvmf_tcp_listen,

View File

@ -604,23 +604,17 @@ nvmf_parse_transport(struct nvmf_parse_transport_ctx *ctx)
opts.buf_cache_size = val;
}
if (trtype == SPDK_NVME_TRANSPORT_RDMA) {
val = spdk_conf_section_get_intval(ctx->sp, "MaxSRQDepth");
if (val >= 0) {
opts.max_srq_depth = val;
}
bval = spdk_conf_section_get_boolval(ctx->sp, "NoSRQ", false);
opts.no_srq = bval;
if (spdk_conf_section_get_val(ctx->sp, "MaxSRQDepth")) {
SPDK_WARNLOG("MaxSRQDepth can be no longer set with INI configs. JSON configuration only\n");
}
if (trtype == SPDK_NVME_TRANSPORT_TCP) {
bval = spdk_conf_section_get_boolval(ctx->sp, "C2HSuccess", true);
opts.c2h_success = bval;
val = spdk_conf_section_get_intval(ctx->sp, "SockPriority");
if (val >= 0) {
opts.sock_priority = val;
}
if (spdk_conf_section_get_val(ctx->sp, "NoSRQ")) {
SPDK_WARNLOG("NoSRQ can be no longer set with INI configs. JSON configuration only\n");
}
if (spdk_conf_section_get_val(ctx->sp, "C2HSuccess")) {
SPDK_WARNLOG("C2HSuccess can be no longer set with INI configs. JSON configuration only\n");
}
if (spdk_conf_section_get_val(ctx->sp, "SockPriority")) {
SPDK_WARNLOG("SockPriority can be no longer set with INI configs. JSON configuration only\n");
}
bval = spdk_conf_section_get_boolval(ctx->sp, "DifInsertOrStrip", false);

View File

@ -33,6 +33,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
SPDK_LIB_LIST = json
TEST_FILE = rdma_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -33,6 +33,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
SPDK_LIB_LIST = json
TEST_FILE = tcp_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk