nvmf: remove accept poller from generic layer
Not every transport requires accept poller - transport specific layer can have its own policy and way of handling new connection. APIs to notify generic layer are already in place - spdk_nvmf_poll_group_add - spdk_nvmf_tgt_new_qpair Having accept poller removed should simplify interrupt mode impl in transport specific layer. Fixes issue #1876 Change-Id: Ia6cac0c2da67a298e88956734c50fb6e6b7521f1 Signed-off-by: Jacek Kalwas <jacek.kalwas@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7268 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
bace05499a
commit
43022da379
@ -46,10 +46,8 @@
|
||||
#include "spdk_internal/event.h"
|
||||
|
||||
#define NVMF_DEFAULT_SUBSYSTEMS 32
|
||||
#define ACCEPT_TIMEOUT_US 10000 /* 10ms */
|
||||
|
||||
static const char *g_rpc_addr = SPDK_DEFAULT_RPC_ADDR;
|
||||
static uint32_t g_acceptor_poll_rate = ACCEPT_TIMEOUT_US;
|
||||
|
||||
enum nvmf_target_state {
|
||||
NVMF_INIT_SUBSYSTEM = 0,
|
||||
@ -122,7 +120,6 @@ usage(char *program_name)
|
||||
printf("\t[-i shared memory ID (optional)]\n");
|
||||
printf("\t[-m core mask for DPDK]\n");
|
||||
printf("\t[-n max subsystems for target(default: 32)]\n");
|
||||
printf("\t[-p acceptor poller rate in us for target(default: 10000us)]\n");
|
||||
printf("\t[-r RPC listen address (default /var/tmp/spdk.sock)]\n");
|
||||
printf("\t[-s memory size in MB for DPDK (default: 0MB)]\n");
|
||||
printf("\t[-u disable PCI access]\n");
|
||||
@ -162,14 +159,6 @@ parse_args(int argc, char **argv, struct spdk_env_opts *opts)
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case 'p':
|
||||
value = spdk_strtol(optarg, 10);
|
||||
if (value < 0) {
|
||||
fprintf(stderr, "converting a string to integer failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
g_acceptor_poll_rate = value;
|
||||
break;
|
||||
case 'r':
|
||||
g_rpc_addr = optarg;
|
||||
break;
|
||||
|
@ -84,7 +84,6 @@ enum spdk_nvmf_tgt_discovery_filter {
|
||||
struct spdk_nvmf_target_opts {
|
||||
char name[NVMF_TGT_NAME_MAX_LENGTH];
|
||||
uint32_t max_subsystems;
|
||||
uint32_t acceptor_poll_rate;
|
||||
uint16_t crdt[3];
|
||||
enum spdk_nvmf_tgt_discovery_filter discovery_filter;
|
||||
};
|
||||
@ -114,6 +113,7 @@ struct spdk_nvmf_transport_opts {
|
||||
* New added fields should be put at the end of the struct.
|
||||
*/
|
||||
size_t opts_size;
|
||||
uint32_t acceptor_poll_rate;
|
||||
};
|
||||
|
||||
struct spdk_nvmf_listen_opts {
|
||||
|
@ -56,6 +56,8 @@
|
||||
#define NVMF_DATA_BUFFER_ALIGNMENT VALUE_4KB
|
||||
#define NVMF_DATA_BUFFER_MASK (NVMF_DATA_BUFFER_ALIGNMENT - 1LL)
|
||||
|
||||
#define SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US 10000
|
||||
|
||||
union nvmf_h2c_msg {
|
||||
struct spdk_nvmf_capsule_cmd nvmf_cmd;
|
||||
struct spdk_nvme_cmd nvme_cmd;
|
||||
@ -304,11 +306,6 @@ struct spdk_nvmf_transport_ops {
|
||||
void (*subsystem_remove_ns)(struct spdk_nvmf_transport *transport,
|
||||
const struct spdk_nvmf_subsystem *subsystem, uint32_t nsid);
|
||||
|
||||
/**
|
||||
* Check for new connections on the transport.
|
||||
*/
|
||||
uint32_t (*accept)(struct spdk_nvmf_transport *transport);
|
||||
|
||||
/**
|
||||
* Initialize subset of identify controller data.
|
||||
*/
|
||||
|
@ -229,6 +229,7 @@ struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
|
||||
|
||||
struct spdk_nvmf_fc_transport {
|
||||
struct spdk_nvmf_transport transport;
|
||||
struct spdk_poller *accept_poller;
|
||||
pthread_mutex_t lock;
|
||||
};
|
||||
|
||||
@ -1957,6 +1958,9 @@ nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
|
||||
opts->num_shared_buffers = SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
|
||||
}
|
||||
|
||||
static int
|
||||
nvmf_fc_accept(void *ctx);
|
||||
|
||||
static struct spdk_nvmf_transport *
|
||||
nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
|
||||
{
|
||||
@ -2005,6 +2009,14 @@ nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
g_nvmf_ftransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_fc_accept,
|
||||
&g_nvmf_ftransport->transport, g_nvmf_ftransport->transport.opts.acceptor_poll_rate);
|
||||
if (!g_nvmf_ftransport->accept_poller) {
|
||||
free(g_nvmf_ftransport);
|
||||
g_nvmf_ftransport = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* initialize the low level FC driver */
|
||||
nvmf_fc_lld_init();
|
||||
|
||||
@ -2033,6 +2045,8 @@ nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
|
||||
TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
|
||||
free(fgroup);
|
||||
}
|
||||
|
||||
spdk_poller_unregister(&g_nvmf_ftransport->accept_poller);
|
||||
g_nvmf_fgroup_count = 0;
|
||||
g_transport_destroy_done_cb = cb_fn;
|
||||
|
||||
@ -2056,9 +2070,10 @@ nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
|
||||
{
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
nvmf_fc_accept(struct spdk_nvmf_transport *transport)
|
||||
static int
|
||||
nvmf_fc_accept(void *ctx)
|
||||
{
|
||||
struct spdk_nvmf_transport *transport = ctx;
|
||||
struct spdk_nvmf_fc_port *fc_port = NULL;
|
||||
uint32_t count = 0;
|
||||
static bool start_lld = false;
|
||||
@ -2075,7 +2090,7 @@ nvmf_fc_accept(struct spdk_nvmf_transport *transport)
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2323,7 +2338,6 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
|
||||
|
||||
.listen = nvmf_fc_listen,
|
||||
.stop_listen = nvmf_fc_stop_listen,
|
||||
.accept = nvmf_fc_accept,
|
||||
|
||||
.listener_discover = nvmf_fc_discover,
|
||||
|
||||
|
@ -49,7 +49,6 @@
|
||||
SPDK_LOG_REGISTER_COMPONENT(nvmf)
|
||||
|
||||
#define SPDK_NVMF_DEFAULT_MAX_SUBSYSTEMS 1024
|
||||
#define SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US 10000
|
||||
|
||||
static TAILQ_HEAD(, spdk_nvmf_tgt) g_nvmf_tgts = TAILQ_HEAD_INITIALIZER(g_nvmf_tgts);
|
||||
|
||||
@ -242,25 +241,10 @@ nvmf_tgt_destroy_poll_group_qpairs(struct spdk_nvmf_poll_group *group)
|
||||
_nvmf_tgt_disconnect_next_qpair(ctx);
|
||||
}
|
||||
|
||||
static int
|
||||
nvmf_tgt_accept(void *ctx)
|
||||
{
|
||||
struct spdk_nvmf_tgt *tgt = ctx;
|
||||
struct spdk_nvmf_transport *transport, *tmp;
|
||||
int count = 0;
|
||||
|
||||
TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, tmp) {
|
||||
count += nvmf_transport_accept(transport);
|
||||
}
|
||||
|
||||
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
struct spdk_nvmf_tgt *
|
||||
spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts)
|
||||
{
|
||||
struct spdk_nvmf_tgt *tgt, *tmp_tgt;
|
||||
uint32_t acceptor_poll_rate;
|
||||
|
||||
if (strnlen(opts->name, NVMF_TGT_NAME_MAX_LENGTH) == NVMF_TGT_NAME_MAX_LENGTH) {
|
||||
SPDK_ERRLOG("Provided target name exceeds the max length of %u.\n", NVMF_TGT_NAME_MAX_LENGTH);
|
||||
@ -287,12 +271,6 @@ spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts)
|
||||
tgt->max_subsystems = opts->max_subsystems;
|
||||
}
|
||||
|
||||
if (!opts || !opts->acceptor_poll_rate) {
|
||||
acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US;
|
||||
} else {
|
||||
acceptor_poll_rate = opts->acceptor_poll_rate;
|
||||
}
|
||||
|
||||
if (!opts) {
|
||||
tgt->crdt[0] = 0;
|
||||
tgt->crdt[1] = 0;
|
||||
@ -321,14 +299,6 @@ spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts)
|
||||
|
||||
pthread_mutex_init(&tgt->mutex, NULL);
|
||||
|
||||
tgt->accept_poller = SPDK_POLLER_REGISTER(nvmf_tgt_accept, tgt, acceptor_poll_rate);
|
||||
if (!tgt->accept_poller) {
|
||||
pthread_mutex_destroy(&tgt->mutex);
|
||||
free(tgt->subsystems);
|
||||
free(tgt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spdk_io_device_register(tgt,
|
||||
nvmf_tgt_create_poll_group,
|
||||
nvmf_tgt_destroy_poll_group,
|
||||
@ -402,8 +372,6 @@ spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt,
|
||||
tgt->destroy_cb_fn = cb_fn;
|
||||
tgt->destroy_cb_arg = cb_arg;
|
||||
|
||||
spdk_poller_unregister(&tgt->accept_poller);
|
||||
|
||||
TAILQ_REMOVE(&g_nvmf_tgts, tgt, link);
|
||||
|
||||
spdk_io_device_unregister(tgt, nvmf_tgt_destroy_cb);
|
||||
|
@ -72,8 +72,6 @@ struct spdk_nvmf_tgt {
|
||||
|
||||
uint64_t discovery_genctr;
|
||||
|
||||
struct spdk_poller *accept_poller;
|
||||
|
||||
uint32_t max_subsystems;
|
||||
|
||||
enum spdk_nvmf_tgt_discovery_filter discovery_filter;
|
||||
|
@ -1932,6 +1932,10 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
|
||||
"tgt_name", offsetof(struct nvmf_rpc_create_transport_ctx, tgt_name),
|
||||
spdk_json_decode_string, true
|
||||
},
|
||||
{
|
||||
"acceptor_poll_rate", offsetof(struct nvmf_rpc_create_transport_ctx, opts.acceptor_poll_rate),
|
||||
spdk_json_decode_uint32, true
|
||||
},
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -485,6 +485,7 @@ struct spdk_nvmf_rdma_transport {
|
||||
|
||||
struct spdk_mempool *data_wr_pool;
|
||||
|
||||
struct spdk_poller *accept_poller;
|
||||
pthread_mutex_t lock;
|
||||
|
||||
/* fields used to poll RDMA/IB events */
|
||||
@ -2194,6 +2195,9 @@ nvmf_rdma_is_rxe_device(struct spdk_nvmf_rdma_device *device)
|
||||
device->attr.vendor_id == NVMF_RXE_VENDOR_ID_NEW;
|
||||
}
|
||||
|
||||
static int
|
||||
nvmf_rdma_accept(void *ctx);
|
||||
|
||||
static struct spdk_nvmf_transport *
|
||||
nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||
{
|
||||
@ -2458,6 +2462,13 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||
rtransport->poll_fds[i++].events = POLLIN;
|
||||
}
|
||||
|
||||
rtransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_rdma_accept, &rtransport->transport,
|
||||
rtransport->transport.opts.acceptor_poll_rate);
|
||||
if (!rtransport->accept_poller) {
|
||||
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &rtransport->transport;
|
||||
}
|
||||
|
||||
@ -2523,6 +2534,7 @@ nvmf_rdma_destroy(struct spdk_nvmf_transport *transport,
|
||||
|
||||
spdk_mempool_free(rtransport->data_wr_pool);
|
||||
|
||||
spdk_poller_unregister(&rtransport->accept_poller);
|
||||
pthread_mutex_destroy(&rtransport->lock);
|
||||
free(rtransport);
|
||||
|
||||
@ -3143,10 +3155,11 @@ nvmf_process_ib_events(struct spdk_nvmf_rdma_device *device, uint32_t max_events
|
||||
SPDK_DEBUGLOG(rdma, "Device %s: %u events processed\n", device->context->device->name, i);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
||||
static int
|
||||
nvmf_rdma_accept(void *ctx)
|
||||
{
|
||||
int nfds, i = 0;
|
||||
struct spdk_nvmf_transport *transport = ctx;
|
||||
struct spdk_nvmf_rdma_transport *rtransport;
|
||||
struct spdk_nvmf_rdma_device *device, *tmp;
|
||||
uint32_t count;
|
||||
@ -3155,7 +3168,7 @@ nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
||||
count = nfds = poll(rtransport->poll_fds, rtransport->npoll_fds, 0);
|
||||
|
||||
if (nfds <= 0) {
|
||||
return 0;
|
||||
return SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
/* The first poll descriptor is RDMA CM event */
|
||||
@ -3165,7 +3178,7 @@ nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
||||
}
|
||||
|
||||
if (nfds == 0) {
|
||||
return count;
|
||||
return SPDK_POLLER_BUSY;
|
||||
}
|
||||
|
||||
/* Second and subsequent poll descriptors are IB async events */
|
||||
@ -3178,7 +3191,7 @@ nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
|
||||
/* check all flagged fd's have been served */
|
||||
assert(nfds == 0);
|
||||
|
||||
return count;
|
||||
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -4217,7 +4230,6 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
|
||||
|
||||
.listen = nvmf_rdma_listen,
|
||||
.stop_listen = nvmf_rdma_stop_listen,
|
||||
.accept = nvmf_rdma_accept,
|
||||
.cdata_init = nvmf_rdma_cdata_init,
|
||||
|
||||
.listener_discover = nvmf_rdma_discover,
|
||||
|
@ -319,6 +319,7 @@ struct spdk_nvmf_tcp_transport {
|
||||
|
||||
struct spdk_nvmf_tcp_poll_group *next_pg;
|
||||
|
||||
struct spdk_poller *accept_poller;
|
||||
pthread_mutex_t lock;
|
||||
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_port) ports;
|
||||
@ -541,6 +542,7 @@ nvmf_tcp_destroy(struct spdk_nvmf_transport *transport,
|
||||
assert(transport != NULL);
|
||||
ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
|
||||
|
||||
spdk_poller_unregister(&ttransport->accept_poller);
|
||||
pthread_mutex_destroy(&ttransport->lock);
|
||||
free(ttransport);
|
||||
|
||||
@ -550,6 +552,9 @@ nvmf_tcp_destroy(struct spdk_nvmf_transport *transport,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvmf_tcp_accept(void *ctx);
|
||||
|
||||
static struct spdk_nvmf_transport *
|
||||
nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
|
||||
{
|
||||
@ -640,6 +645,13 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
|
||||
|
||||
pthread_mutex_init(&ttransport->lock, NULL);
|
||||
|
||||
ttransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_tcp_accept, &ttransport->transport,
|
||||
ttransport->transport.opts.acceptor_poll_rate);
|
||||
if (!ttransport->accept_poller) {
|
||||
free(ttransport);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &ttransport->transport;
|
||||
}
|
||||
|
||||
@ -1106,9 +1118,10 @@ nvmf_tcp_port_accept(struct spdk_nvmf_transport *transport, struct spdk_nvmf_tcp
|
||||
return count;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
nvmf_tcp_accept(struct spdk_nvmf_transport *transport)
|
||||
static int
|
||||
nvmf_tcp_accept(void *ctx)
|
||||
{
|
||||
struct spdk_nvmf_transport *transport = ctx;
|
||||
struct spdk_nvmf_tcp_transport *ttransport;
|
||||
struct spdk_nvmf_tcp_port *port;
|
||||
uint32_t count = 0;
|
||||
@ -1119,7 +1132,7 @@ nvmf_tcp_accept(struct spdk_nvmf_transport *transport)
|
||||
count += nvmf_tcp_port_accept(transport, port);
|
||||
}
|
||||
|
||||
return count;
|
||||
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3052,7 +3065,6 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
|
||||
|
||||
.listen = nvmf_tcp_listen,
|
||||
.stop_listen = nvmf_tcp_stop_listen,
|
||||
.accept = nvmf_tcp_accept,
|
||||
|
||||
.listener_discover = nvmf_tcp_discover,
|
||||
|
||||
|
@ -181,10 +181,11 @@ static void nvmf_transport_opts_copy(struct spdk_nvmf_transport_opts *opts,
|
||||
SET_FIELD(abort_timeout_sec);
|
||||
SET_FIELD(association_timeout);
|
||||
SET_FIELD(transport_specific);
|
||||
SET_FIELD(acceptor_poll_rate);
|
||||
|
||||
/* Do not remove this statement, you should always update this statement when you adding a new field,
|
||||
* and do not forget to add the SET_FIELD statement for your added field. */
|
||||
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_transport_opts) == 56, "Incorrect size");
|
||||
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_transport_opts) == 64, "Incorrect size");
|
||||
|
||||
#undef SET_FIELD
|
||||
#undef FILED_CHECK
|
||||
@ -438,12 +439,6 @@ spdk_nvmf_transport_stop_listen_async(struct spdk_nvmf_transport *transport,
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
nvmf_transport_accept(struct spdk_nvmf_transport *transport)
|
||||
{
|
||||
return transport->ops->accept(transport);
|
||||
}
|
||||
|
||||
void
|
||||
nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvme_transport_id *trid,
|
||||
@ -640,6 +635,7 @@ spdk_nvmf_transport_opts_init(const char *transport_name,
|
||||
}
|
||||
|
||||
opts_local.association_timeout = NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS;
|
||||
opts_local.acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US;
|
||||
ops->opts_init(&opts_local);
|
||||
|
||||
nvmf_transport_opts_copy(opts, &opts_local, opts_size);
|
||||
|
@ -40,8 +40,6 @@
|
||||
#include "spdk/nvmf.h"
|
||||
#include "spdk/nvmf_transport.h"
|
||||
|
||||
uint32_t nvmf_transport_accept(struct spdk_nvmf_transport *transport);
|
||||
|
||||
void nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvme_transport_id *trid,
|
||||
struct spdk_nvmf_discovery_log_page_entry *entry);
|
||||
|
@ -211,6 +211,7 @@ struct nvmf_vfio_user_transport_opts {
|
||||
struct nvmf_vfio_user_transport {
|
||||
struct spdk_nvmf_transport transport;
|
||||
struct nvmf_vfio_user_transport_opts transport_opts;
|
||||
struct spdk_poller *accept_poller;
|
||||
pthread_mutex_t lock;
|
||||
TAILQ_HEAD(, nvmf_vfio_user_endpoint) endpoints;
|
||||
};
|
||||
@ -533,6 +534,7 @@ nvmf_vfio_user_destroy(struct spdk_nvmf_transport *transport,
|
||||
vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport,
|
||||
transport);
|
||||
|
||||
spdk_poller_unregister(&vu_transport->accept_poller);
|
||||
(void)pthread_mutex_destroy(&vu_transport->lock);
|
||||
|
||||
TAILQ_FOREACH_SAFE(endpoint, &vu_transport->endpoints, link, tmp) {
|
||||
@ -557,6 +559,9 @@ static const struct spdk_json_object_decoder vfio_user_transport_opts_decoder[]
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
nvmf_vfio_user_accept(void *ctx);
|
||||
|
||||
static struct spdk_nvmf_transport *
|
||||
nvmf_vfio_user_create(struct spdk_nvmf_transport_opts *opts)
|
||||
{
|
||||
@ -592,6 +597,13 @@ nvmf_vfio_user_create(struct spdk_nvmf_transport_opts *opts)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
vu_transport->accept_poller = SPDK_POLLER_REGISTER(nvmf_vfio_user_accept, &vu_transport->transport,
|
||||
vu_transport->transport.opts.acceptor_poll_rate);
|
||||
if (!vu_transport->accept_poller) {
|
||||
free(vu_transport);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SPDK_DEBUGLOG(nvmf_vfio, "vfio_user transport: disable_mappable_bar0=%d\n",
|
||||
vu_transport->transport_opts.disable_mappable_bar0);
|
||||
|
||||
@ -2231,12 +2243,11 @@ nvmf_vfio_user_listen_associate(struct spdk_nvmf_transport *transport,
|
||||
*
|
||||
* This poller also takes care of handling the creation of any pending new
|
||||
* qpairs.
|
||||
*
|
||||
* Returns the number of events handled.
|
||||
*/
|
||||
static uint32_t
|
||||
nvmf_vfio_user_accept(struct spdk_nvmf_transport *transport)
|
||||
static int
|
||||
nvmf_vfio_user_accept(void *ctx)
|
||||
{
|
||||
struct spdk_nvmf_transport *transport = ctx;
|
||||
struct nvmf_vfio_user_transport *vu_transport;
|
||||
struct nvmf_vfio_user_endpoint *endpoint;
|
||||
uint32_t count = 0;
|
||||
@ -2259,7 +2270,7 @@ nvmf_vfio_user_accept(struct spdk_nvmf_transport *transport)
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&vu_transport->lock);
|
||||
return 1;
|
||||
return SPDK_POLLER_BUSY;
|
||||
}
|
||||
|
||||
count++;
|
||||
@ -2270,7 +2281,7 @@ nvmf_vfio_user_accept(struct spdk_nvmf_transport *transport)
|
||||
|
||||
pthread_mutex_unlock(&vu_transport->lock);
|
||||
|
||||
return count;
|
||||
return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3010,7 +3021,6 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_vfio_user = {
|
||||
|
||||
.listen = nvmf_vfio_user_listen,
|
||||
.stop_listen = nvmf_vfio_user_stop_listen,
|
||||
.accept = nvmf_vfio_user_accept,
|
||||
.cdata_init = nvmf_vfio_user_cdata_init,
|
||||
.listen_associate = nvmf_vfio_user_listen_associate,
|
||||
|
||||
|
@ -43,14 +43,11 @@
|
||||
#include "spdk_internal/init.h"
|
||||
#include "spdk/log.h"
|
||||
|
||||
#define ACCEPT_TIMEOUT_US 10000 /* 10ms */
|
||||
|
||||
struct spdk_nvmf_admin_passthru_conf {
|
||||
bool identify_ctrlr;
|
||||
};
|
||||
|
||||
struct spdk_nvmf_tgt_conf {
|
||||
uint32_t acceptor_poll_rate;
|
||||
uint32_t conn_sched; /* Deprecated. */
|
||||
struct spdk_nvmf_admin_passthru_conf admin_passthru;
|
||||
enum spdk_nvmf_tgt_discovery_filter discovery_filter;
|
||||
|
@ -197,7 +197,6 @@ nvmf_decode_poll_groups_mask(const struct spdk_json_val *val, void *out)
|
||||
}
|
||||
|
||||
static const struct spdk_json_object_decoder nvmf_rpc_subsystem_tgt_conf_decoder[] = {
|
||||
{"acceptor_poll_rate", offsetof(struct spdk_nvmf_tgt_conf, acceptor_poll_rate), spdk_json_decode_uint32, true},
|
||||
{"conn_sched", offsetof(struct spdk_nvmf_tgt_conf, conn_sched), decode_conn_sched, true},
|
||||
{"admin_cmd_passthru", offsetof(struct spdk_nvmf_tgt_conf, admin_passthru), decode_admin_passthru, true},
|
||||
{"poll_groups_mask", 0, nvmf_decode_poll_groups_mask, true},
|
||||
|
@ -61,7 +61,6 @@ struct nvmf_tgt_poll_group {
|
||||
};
|
||||
|
||||
struct spdk_nvmf_tgt_conf g_spdk_nvmf_tgt_conf = {
|
||||
.acceptor_poll_rate = ACCEPT_TIMEOUT_US,
|
||||
.admin_passthru.identify_ctrlr = false
|
||||
};
|
||||
|
||||
@ -303,7 +302,6 @@ nvmf_tgt_create_target(void)
|
||||
};
|
||||
|
||||
opts.max_subsystems = g_spdk_nvmf_tgt_max_subsystems;
|
||||
opts.acceptor_poll_rate = g_spdk_nvmf_tgt_conf.acceptor_poll_rate;
|
||||
opts.crdt[0] = g_spdk_nvmf_tgt_crdt[0];
|
||||
opts.crdt[1] = g_spdk_nvmf_tgt_crdt[1];
|
||||
opts.crdt[2] = g_spdk_nvmf_tgt_crdt[2];
|
||||
@ -520,7 +518,6 @@ nvmf_subsystem_write_config_json(struct spdk_json_write_ctx *w)
|
||||
spdk_json_write_named_string(w, "method", "nvmf_set_config");
|
||||
|
||||
spdk_json_write_named_object_begin(w, "params");
|
||||
spdk_json_write_named_uint32(w, "acceptor_poll_rate", g_spdk_nvmf_tgt_conf.acceptor_poll_rate);
|
||||
nvmf_subsystem_dump_discover_filter(w);
|
||||
spdk_json_write_named_object_begin(w, "admin_cmd_passthru");
|
||||
spdk_json_write_named_bool(w, "identify_ctrlr",
|
||||
|
@ -1919,7 +1919,6 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
|
||||
def nvmf_set_config(args):
|
||||
rpc.nvmf.nvmf_set_config(args.client,
|
||||
acceptor_poll_rate=args.acceptor_poll_rate,
|
||||
conn_sched=args.conn_sched,
|
||||
passthru_identify_ctrlr=args.passthru_identify_ctrlr,
|
||||
poll_groups_mask=args.poll_groups_mask,
|
||||
@ -1927,7 +1926,6 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
|
||||
p = subparsers.add_parser('nvmf_set_config', aliases=['set_nvmf_target_config'],
|
||||
help='Set NVMf target config')
|
||||
p.add_argument('-r', '--acceptor-poll-rate', help='Polling interval of the acceptor for incoming connections (usec)', type=int)
|
||||
p.add_argument('-s', '--conn-sched', help='(Deprecated). Ignored.')
|
||||
p.add_argument('-i', '--passthru-identify-ctrlr', help="""Passthrough fields like serial number and model number
|
||||
when the controller has a single namespace that is an NVMe bdev""", action='store_true')
|
||||
@ -1966,6 +1964,7 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
Relevant only for TCP transport""", type=int)
|
||||
p.add_argument('-M', '--disable-mappable-bar0', action='store_true', help="""Disable mmap() of BAR0.
|
||||
Relevant only for VFIO-USER transport""")
|
||||
p.add_argument('--acceptor-poll-rate', help='Polling interval of the acceptor for incoming connections (usec)', type=int)
|
||||
p.set_defaults(func=nvmf_create_transport)
|
||||
|
||||
def nvmf_get_transports(args):
|
||||
|
@ -21,7 +21,6 @@ def nvmf_set_max_subsystems(client,
|
||||
|
||||
@deprecated_alias('set_nvmf_target_config')
|
||||
def nvmf_set_config(client,
|
||||
acceptor_poll_rate=None,
|
||||
conn_sched=None,
|
||||
passthru_identify_ctrlr=None,
|
||||
poll_groups_mask=None,
|
||||
@ -29,7 +28,6 @@ def nvmf_set_config(client,
|
||||
"""Set NVMe-oF target subsystem configuration.
|
||||
|
||||
Args:
|
||||
acceptor_poll_rate: Acceptor poll period in microseconds (optional)
|
||||
conn_sched: (Deprecated) Ignored
|
||||
discovery_filter: Set discovery filter (optional), possible values are: `match_any` (default) or
|
||||
comma separated values: `transport`, `address`, `svcid`
|
||||
@ -39,8 +37,6 @@ def nvmf_set_config(client,
|
||||
"""
|
||||
params = {}
|
||||
|
||||
if acceptor_poll_rate:
|
||||
params['acceptor_poll_rate'] = acceptor_poll_rate
|
||||
if conn_sched:
|
||||
print("WARNING: conn_sched is deprecated and ignored.")
|
||||
if passthru_identify_ctrlr:
|
||||
@ -128,6 +124,7 @@ def nvmf_create_transport(client, **params):
|
||||
no_wr_batching: Boolean flag to disable work requests batching - RDMA specific (optional)
|
||||
control_msg_num: The number of control messages per poll group - TCP specific (optional)
|
||||
disable_mappable_bar0: disable client mmap() of BAR0 - VFIO-USER specific (optional)
|
||||
acceptor_poll_rate: Acceptor poll period in microseconds (optional)
|
||||
Returns:
|
||||
True or False
|
||||
"""
|
||||
|
@ -396,7 +396,7 @@ test_nvmf_tcp_create(void)
|
||||
CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
|
||||
/* destroy transport */
|
||||
spdk_mempool_free(ttransport->transport.data_buf_pool);
|
||||
free(ttransport);
|
||||
CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
|
||||
|
||||
/* case 2 */
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
@ -419,7 +419,7 @@ test_nvmf_tcp_create(void)
|
||||
CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
|
||||
/* destroy transport */
|
||||
spdk_mempool_free(ttransport->transport.data_buf_pool);
|
||||
free(ttransport);
|
||||
CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
|
||||
|
||||
/* case 3 */
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
|
Loading…
Reference in New Issue
Block a user