nvmf: zero-copy enable flag in transport opts

It makes it possible for the user to specify whether a transport should
try to use zero-copy to execute requests when possible.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I40a92b0d7a6707f4c9292795f380846acb227200
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10780
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Konrad Sztyber 2021-11-24 15:42:24 +01:00 committed by Jim Harris
parent 39b7e10fd4
commit aa1d039836
8 changed files with 39 additions and 1 deletions

View File

@ -6035,6 +6035,7 @@ abort_timeout_sec | Optional | number | Abort execution timeout value
no_wr_batching | Optional | boolean | Disable work requests batching (RDMA only)
control_msg_num | Optional | number | The number of control messages per poll group (TCP only)
disable_mappable_bar0 | Optional | boolean | disable client mmap() of BAR0 (VFIO-USER only)
zcopy | Optional | boolean | Use zero-copy operations if the underlying bdev supports them
#### Example

View File

@ -114,6 +114,8 @@ struct spdk_nvmf_transport_opts {
*/
size_t opts_size;
uint32_t acceptor_poll_rate;
/* Use zero-copy operations if the underlying bdev supports them */
bool zcopy;
};
struct spdk_nvmf_listen_opts {

View File

@ -3695,10 +3695,15 @@ nvmf_ctrlr_process_io_fused_cmd(struct spdk_nvmf_request *req, struct spdk_bdev
bool
nvmf_ctrlr_use_zcopy(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_transport *transport = req->qpair->transport;
struct spdk_nvmf_ns *ns;
req->zcopy_phase = NVMF_ZCOPY_PHASE_NONE;
if (!transport->opts.zcopy) {
return false;
}
if (nvmf_qpair_is_admin_queue(req->qpair)) {
/* Admin queue */
return false;

View File

@ -1928,6 +1928,10 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
"abort_timeout_sec", offsetof(struct nvmf_rpc_create_transport_ctx, opts.abort_timeout_sec),
spdk_json_decode_uint32, true
},
{
"zcopy", offsetof(struct nvmf_rpc_create_transport_ctx, opts.zcopy),
spdk_json_decode_bool, true
},
{
"tgt_name", offsetof(struct nvmf_rpc_create_transport_ctx, tgt_name),
spdk_json_decode_string, true

View File

@ -114,6 +114,7 @@ nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json
spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
spdk_json_write_named_uint32(w, "buf_cache_size", opts->buf_cache_size);
spdk_json_write_named_bool(w, "dif_insert_or_strip", opts->dif_insert_or_strip);
spdk_json_write_named_bool(w, "zcopy", opts->zcopy);
if (transport->ops->dump_opts) {
transport->ops->dump_opts(transport, w);
@ -182,6 +183,7 @@ static void nvmf_transport_opts_copy(struct spdk_nvmf_transport_opts *opts,
SET_FIELD(association_timeout);
SET_FIELD(transport_specific);
SET_FIELD(acceptor_poll_rate);
SET_FIELD(zcopy);
/* Do not remove this statement, you should always update this statement when you adding a new field,
* and do not forget to add the SET_FIELD statement for your added field. */

View File

@ -1950,6 +1950,8 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
p.add_argument('-a', '--max-aq-depth', help='Max number of admin cmds per AQ', type=int)
p.add_argument('-n', '--num-shared-buffers', help='The number of pooled data buffers available to the transport', type=int)
p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
p.add_argument('-z', '--zcopy', action='store_true', help='''Use zero-copy operations if the
underlying bdev supports them''')
p.add_argument('-d', '--num-cqe', help="""The number of CQ entires. Only used when no_srq=true.
Relevant only for RDMA transport""", type=int)
p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)

View File

@ -114,6 +114,7 @@ def nvmf_create_transport(client, **params):
max_aq_depth: Max size admin queue per controller (optional)
num_shared_buffers: The number of pooled data buffers available to the transport (optional)
buf_cache_size: The number of shared buffers to reserve for each poll group (optional)
zcopy: Use zero-copy operations if the underlying bdev supports them (optional)
num_cqe: The number of CQ entries to configure CQ size. Only used when no_srq=true - RDMA specific (optional)
max_srq_depth: Max number of outstanding I/O per shared receive queue - RDMA specific (optional)
no_srq: Boolean flag to disable SRQ even for devices that support it - RDMA specific (optional)

View File

@ -2291,6 +2291,7 @@ static void
test_nvmf_ctrlr_use_zcopy(void)
{
struct spdk_nvmf_subsystem subsystem = {};
struct spdk_nvmf_transport transport = {};
struct spdk_nvmf_request req = {};
struct spdk_nvmf_qpair qpair = {};
struct spdk_nvmf_ctrlr ctrlr = {};
@ -2314,10 +2315,13 @@ test_nvmf_ctrlr_use_zcopy(void)
ctrlr.subsys = &subsystem;
transport.opts.zcopy = true;
qpair.ctrlr = &ctrlr;
qpair.group = &group;
qpair.qid = 1;
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
qpair.transport = &transport;
group.thread = spdk_get_thread();
group.num_sgroups = 1;
@ -2360,9 +2364,14 @@ test_nvmf_ctrlr_use_zcopy(void)
/* ZCOPY Not supported */
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
ns.zcopy = true;
/* ZCOPY disabled on transport level */
transport.opts.zcopy = false;
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req) == false);
transport.opts.zcopy = true;
/* Success */
ns.zcopy = true;
CU_ASSERT(nvmf_ctrlr_use_zcopy(&req));
}
@ -2371,6 +2380,7 @@ test_spdk_nvmf_request_zcopy_start(void)
{
struct spdk_nvmf_request req = {};
struct spdk_nvmf_qpair qpair = {};
struct spdk_nvmf_transport transport = {};
struct spdk_nvme_cmd cmd = {};
union nvmf_c2h_msg rsp = {};
struct spdk_nvmf_ctrlr ctrlr = {};
@ -2402,6 +2412,8 @@ test_spdk_nvmf_request_zcopy_start(void)
ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
ctrlr.listener = &listener;
transport.opts.zcopy = true;
group.thread = spdk_get_thread();
group.num_sgroups = 1;
sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
@ -2415,6 +2427,7 @@ test_spdk_nvmf_request_zcopy_start(void)
qpair.ctrlr = &ctrlr;
qpair.group = &group;
qpair.transport = &transport;
qpair.qid = 1;
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
@ -2496,6 +2509,7 @@ test_zcopy_read(void)
{
struct spdk_nvmf_request req = {};
struct spdk_nvmf_qpair qpair = {};
struct spdk_nvmf_transport transport = {};
struct spdk_nvme_cmd cmd = {};
union nvmf_c2h_msg rsp = {};
struct spdk_nvmf_ctrlr ctrlr = {};
@ -2527,6 +2541,8 @@ test_zcopy_read(void)
ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
ctrlr.listener = &listener;
transport.opts.zcopy = true;
group.thread = spdk_get_thread();
group.num_sgroups = 1;
sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
@ -2540,6 +2556,7 @@ test_zcopy_read(void)
qpair.ctrlr = &ctrlr;
qpair.group = &group;
qpair.transport = &transport;
qpair.qid = 1;
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
@ -2583,6 +2600,7 @@ test_zcopy_write(void)
{
struct spdk_nvmf_request req = {};
struct spdk_nvmf_qpair qpair = {};
struct spdk_nvmf_transport transport = {};
struct spdk_nvme_cmd cmd = {};
union nvmf_c2h_msg rsp = {};
struct spdk_nvmf_ctrlr ctrlr = {};
@ -2614,6 +2632,8 @@ test_zcopy_write(void)
ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
ctrlr.listener = &listener;
transport.opts.zcopy = true;
group.thread = spdk_get_thread();
group.num_sgroups = 1;
sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
@ -2627,6 +2647,7 @@ test_zcopy_write(void)
qpair.ctrlr = &ctrlr;
qpair.group = &group;
qpair.transport = &transport;
qpair.qid = 1;
qpair.state = SPDK_NVMF_QPAIR_ACTIVE;