lib: net, notify, nvme, rocksdb remove spdk_ prefix.

remove only the spdk_ prefix from static functions in
the above libraries.

Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: I59ce032c3312fa73f30c133fd62e603c1eee2859
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2365
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Seth Howell 2020-05-10 16:32:35 -07:00 committed by Tomasz Zawadzki
parent 0be5557cad
commit a3f72b2e5a
15 changed files with 117 additions and 117 deletions

View File

@ -317,7 +317,7 @@ Where X is unique SPDK NVMe controller index and Y is namespace id.
Requests from CUSE are handled by pthreads when controller and namespaces are created.
Those pass the I/O or admin commands via a ring to a thread that processes them using
spdk_nvme_io_msg_process().
nvme_io_msg_process().
Ioctls that request information attained when attaching NVMe controller receive an
immediate response, without passing them through the ring.

View File

@ -58,8 +58,8 @@ static const struct spdk_json_object_decoder rpc_ip_address_decoders[] = {
};
static void
spdk_rpc_net_interface_add_ip_address(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
rpc_net_interface_add_ip_address(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_ip_address req = {};
struct spdk_json_write_ctx *w;
@ -100,13 +100,13 @@ spdk_rpc_net_interface_add_ip_address(struct spdk_jsonrpc_request *request,
invalid:
free_rpc_ip_address(&req);
}
SPDK_RPC_REGISTER("net_interface_add_ip_address", spdk_rpc_net_interface_add_ip_address,
SPDK_RPC_REGISTER("net_interface_add_ip_address", rpc_net_interface_add_ip_address,
SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(net_interface_add_ip_address, add_ip_address)
static void
spdk_rpc_net_interface_delete_ip_address(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
rpc_net_interface_delete_ip_address(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_ip_address req = {};
struct spdk_json_write_ctx *w;
@ -147,13 +147,13 @@ spdk_rpc_net_interface_delete_ip_address(struct spdk_jsonrpc_request *request,
invalid:
free_rpc_ip_address(&req);
}
SPDK_RPC_REGISTER("net_interface_delete_ip_address", spdk_rpc_net_interface_delete_ip_address,
SPDK_RPC_REGISTER("net_interface_delete_ip_address", rpc_net_interface_delete_ip_address,
SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(net_interface_delete_ip_address, delete_ip_address)
static void
spdk_rpc_net_get_interfaces(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
rpc_net_get_interfaces(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct spdk_json_write_ctx *w;
TAILQ_HEAD(, spdk_interface) *interface_head = interface_get_list();
@ -192,7 +192,7 @@ spdk_rpc_net_get_interfaces(struct spdk_jsonrpc_request *request,
spdk_jsonrpc_end_result(request, w);
}
SPDK_RPC_REGISTER("net_get_interfaces", spdk_rpc_net_get_interfaces, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER("net_get_interfaces", rpc_net_get_interfaces, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(net_get_interfaces, get_interfaces)
SPDK_LOG_REGISTER_COMPONENT("net", SPDK_LOG_NET)

View File

@ -48,8 +48,8 @@ notify_get_types_cb(const struct spdk_notify_type *type, void *ctx)
}
static void
spdk_rpc_notify_get_types(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
rpc_notify_get_types(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct spdk_json_write_ctx *w;
@ -66,7 +66,7 @@ spdk_rpc_notify_get_types(struct spdk_jsonrpc_request *request,
spdk_jsonrpc_end_result(request, w);
}
SPDK_RPC_REGISTER("notify_get_types", spdk_rpc_notify_get_types, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER("notify_get_types", rpc_notify_get_types, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(notify_get_types, get_notification_types)
struct rpc_notify_get_notifications {
@ -96,8 +96,8 @@ notify_get_notifications_cb(uint64_t id, const struct spdk_notify_event *ev, voi
}
static void
spdk_rpc_notify_get_notifications(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
rpc_notify_get_notifications(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct rpc_notify_get_notifications req = {0, UINT64_MAX};
@ -120,7 +120,7 @@ spdk_rpc_notify_get_notifications(struct spdk_jsonrpc_request *request,
spdk_jsonrpc_end_result(request, req.w);
}
SPDK_RPC_REGISTER("notify_get_notifications", spdk_rpc_notify_get_notifications, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER("notify_get_notifications", rpc_notify_get_notifications, SPDK_RPC_RUNTIME)
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(notify_get_notifications, get_notifications)
SPDK_LOG_REGISTER_COMPONENT("notify_rpc", SPDK_NOTIFY_RPC)

View File

@ -445,7 +445,7 @@ nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid,
spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
if (!probe_ctx->probe_cb || probe_ctx->probe_cb(probe_ctx->cb_ctx, trid, &opts)) {
ctrlr = spdk_nvme_get_ctrlr_by_trid_unsafe(trid);
ctrlr = nvme_get_ctrlr_by_trid_unsafe(trid);
if (ctrlr) {
/* This ctrlr already exists.
* Increase the ref count before calling attach_cb() as the user may
@ -554,12 +554,12 @@ nvme_init_controllers(struct spdk_nvme_probe_ctx *probe_ctx)
/* This function must not be called while holding g_spdk_nvme_driver->lock */
static struct spdk_nvme_ctrlr *
spdk_nvme_get_ctrlr_by_trid(const struct spdk_nvme_transport_id *trid)
nvme_get_ctrlr_by_trid(const struct spdk_nvme_transport_id *trid)
{
struct spdk_nvme_ctrlr *ctrlr;
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
ctrlr = spdk_nvme_get_ctrlr_by_trid_unsafe(trid);
ctrlr = nvme_get_ctrlr_by_trid_unsafe(trid);
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
return ctrlr;
@ -567,7 +567,7 @@ spdk_nvme_get_ctrlr_by_trid(const struct spdk_nvme_transport_id *trid)
/* This function must be called while holding g_spdk_nvme_driver->lock */
struct spdk_nvme_ctrlr *
spdk_nvme_get_ctrlr_by_trid_unsafe(const struct spdk_nvme_transport_id *trid)
nvme_get_ctrlr_by_trid_unsafe(const struct spdk_nvme_transport_id *trid)
{
struct spdk_nvme_ctrlr *ctrlr;
@ -590,8 +590,8 @@ spdk_nvme_get_ctrlr_by_trid_unsafe(const struct spdk_nvme_transport_id *trid)
/* This function must only be called while holding g_spdk_nvme_driver->lock */
static int
spdk_nvme_probe_internal(struct spdk_nvme_probe_ctx *probe_ctx,
bool direct_connect)
nvme_probe_internal(struct spdk_nvme_probe_ctx *probe_ctx,
bool direct_connect)
{
int rc;
struct spdk_nvme_ctrlr *ctrlr, *ctrlr_tmp;
@ -651,12 +651,12 @@ spdk_nvme_probe_internal(struct spdk_nvme_probe_ctx *probe_ctx,
}
static void
spdk_nvme_probe_ctx_init(struct spdk_nvme_probe_ctx *probe_ctx,
const struct spdk_nvme_transport_id *trid,
void *cb_ctx,
spdk_nvme_probe_cb probe_cb,
spdk_nvme_attach_cb attach_cb,
spdk_nvme_remove_cb remove_cb)
nvme_probe_ctx_init(struct spdk_nvme_probe_ctx *probe_ctx,
const struct spdk_nvme_transport_id *trid,
void *cb_ctx,
spdk_nvme_probe_cb probe_cb,
spdk_nvme_attach_cb attach_cb,
spdk_nvme_remove_cb remove_cb)
{
probe_ctx->trid = *trid;
probe_ctx->cb_ctx = cb_ctx;
@ -695,8 +695,8 @@ spdk_nvme_probe(const struct spdk_nvme_transport_id *trid, void *cb_ctx,
}
static bool
spdk_nvme_connect_probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
struct spdk_nvme_ctrlr_opts *opts)
nvme_connect_probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
struct spdk_nvme_ctrlr_opts *opts)
{
struct spdk_nvme_ctrlr_opts *requested_opts = cb_ctx;
@ -735,7 +735,7 @@ spdk_nvme_connect(const struct spdk_nvme_transport_id *trid,
return NULL;
}
ctrlr = spdk_nvme_get_ctrlr_by_trid(trid);
ctrlr = nvme_get_ctrlr_by_trid(trid);
return ctrlr;
}
@ -1213,8 +1213,8 @@ spdk_nvme_probe_async(const struct spdk_nvme_transport_id *trid,
return NULL;
}
spdk_nvme_probe_ctx_init(probe_ctx, trid, cb_ctx, probe_cb, attach_cb, remove_cb);
rc = spdk_nvme_probe_internal(probe_ctx, false);
nvme_probe_ctx_init(probe_ctx, trid, cb_ctx, probe_cb, attach_cb, remove_cb);
rc = nvme_probe_internal(probe_ctx, false);
if (rc != 0) {
free(probe_ctx);
return NULL;
@ -1273,11 +1273,11 @@ spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid,
}
if (opts) {
probe_cb = spdk_nvme_connect_probe_cb;
probe_cb = nvme_connect_probe_cb;
}
spdk_nvme_probe_ctx_init(probe_ctx, trid, (void *)opts, probe_cb, attach_cb, NULL);
rc = spdk_nvme_probe_internal(probe_ctx, true);
nvme_probe_ctx_init(probe_ctx, trid, (void *)opts, probe_cb, attach_cb, NULL);
rc = nvme_probe_internal(probe_ctx, true);
if (rc != 0) {
free(probe_ctx);
return NULL;

View File

@ -2882,7 +2882,7 @@ spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
nvme_ctrlr_keep_alive(ctrlr);
}
rc = spdk_nvme_io_msg_process(ctrlr);
rc = nvme_io_msg_process(ctrlr);
if (rc < 0) {
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
@ -2945,7 +2945,7 @@ spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
}
static int32_t
spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
{
int32_t result = -1;
@ -2978,7 +2978,7 @@ spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
bool
spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
{
return spdk_nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1;
return nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1;
}
uint32_t
@ -2990,7 +2990,7 @@ spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr)
uint32_t
spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid)
{
int32_t nsid_idx = spdk_nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid);
int32_t nsid_idx = nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid);
if (ctrlr->active_ns_list && nsid_idx >= 0 && (uint32_t)nsid_idx < ctrlr->num_ns - 1) {
return ctrlr->active_ns_list[nsid_idx + 1];
}

View File

@ -565,7 +565,7 @@ spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page
}
static void
spdk_nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
{
struct nvme_request *req, *next, *tmp;
struct spdk_nvme_ctrlr *ctrlr;
@ -612,7 +612,7 @@ spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair
}
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
req = nvme_allocate_request_null(ctrlr->adminq, spdk_nvme_ctrlr_cmd_abort_cpl, NULL);
req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
if (req == NULL) {
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return -ENOMEM;

View File

@ -1150,7 +1150,7 @@ int nvme_robust_mutex_init_recursive_shared(pthread_mutex_t *mtx);
bool nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
struct spdk_nvme_ctrlr *spdk_nvme_get_ctrlr_by_trid_unsafe(
struct spdk_nvme_ctrlr *nvme_get_ctrlr_by_trid_unsafe(
const struct spdk_nvme_transport_id *trid);
const struct spdk_nvme_transport *nvme_get_transport(const char *transport_name);

View File

@ -75,7 +75,7 @@ nvme_io_msg_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_io_msg_
}
int
spdk_nvme_io_msg_process(struct spdk_nvme_ctrlr *ctrlr)
nvme_io_msg_process(struct spdk_nvme_ctrlr *ctrlr)
{
int i;
int count;

View File

@ -77,7 +77,7 @@ int nvme_io_msg_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_io_
*
* \return number of processed external IO messages.
*/
int spdk_nvme_io_msg_process(struct spdk_nvme_ctrlr *ctrlr);
int nvme_io_msg_process(struct spdk_nvme_ctrlr *ctrlr);
int nvme_io_msg_ctrlr_register(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_io_msg_producer *io_msg_producer);

View File

@ -42,8 +42,8 @@ static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
static bool
spdk_nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
uint32_t sectors_per_stripe, uint32_t qdepth)
nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
uint32_t sectors_per_stripe, uint32_t qdepth)
{
uint32_t child_per_io = UINT32_MAX;
@ -466,10 +466,10 @@ spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -499,10 +499,10 @@ spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -534,10 +534,10 @@ spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair
io_flags, 0, 0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -569,10 +569,10 @@ spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpai
SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -599,10 +599,10 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, vo
0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -630,10 +630,10 @@ spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -664,10 +664,10 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
io_flags, 0, 0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -699,10 +699,10 @@ spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
io_flags, apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -728,10 +728,10 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
io_flags, 0, 0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -757,10 +757,10 @@ spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
io_flags, apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -791,10 +791,10 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
io_flags, 0, 0, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;
@ -826,10 +826,10 @@ spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
io_flags, apptag_mask, apptag, true);
if (req != NULL) {
return nvme_qpair_submit_request(qpair, req);
} else if (spdk_nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
} else if (nvme_ns_check_request_length(lba_count,
ns->sectors_per_max_io,
ns->sectors_per_stripe,
qpair->ctrlr->opts.io_queue_requests)) {
return -EINVAL;
} else {
return -ENOMEM;

View File

@ -291,7 +291,7 @@ _nvme_pcie_hotplug_monitor(struct spdk_nvme_probe_ctx *probe_ctx)
spdk_nvme_trid_populate_transport(&trid, SPDK_NVME_TRANSPORT_PCIE);
snprintf(trid.traddr, sizeof(trid.traddr), "%s", event.traddr);
ctrlr = spdk_nvme_get_ctrlr_by_trid_unsafe(&trid);
ctrlr = nvme_get_ctrlr_by_trid_unsafe(&trid);
if (ctrlr == NULL) {
return 0;
}
@ -742,7 +742,7 @@ pcie_nvme_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
spdk_nvme_trid_populate_transport(&trid, SPDK_NVME_TRANSPORT_PCIE);
spdk_pci_addr_fmt(trid.traddr, sizeof(trid.traddr), &pci_addr);
ctrlr = spdk_nvme_get_ctrlr_by_trid_unsafe(&trid);
ctrlr = nvme_get_ctrlr_by_trid_unsafe(&trid);
if (!spdk_process_is_primary()) {
if (!ctrlr) {
SPDK_ERRLOG("Controller must be constructed in the primary process first.\n");

View File

@ -123,7 +123,7 @@ struct nvme_tcp_req {
TAILQ_ENTRY(nvme_tcp_req) link;
};
static void spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
static void nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req);
static inline struct nvme_tcp_qpair *
nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
@ -1082,7 +1082,7 @@ nvme_tcp_qpair_h2c_data_send_complete(void *cb_arg)
assert(tcp_req != NULL);
if (tcp_req->r2tl_remain) {
spdk_nvme_tcp_send_h2c_data(tcp_req);
nvme_tcp_send_h2c_data(tcp_req);
} else {
assert(tcp_req->active_r2ts > 0);
tcp_req->active_r2ts--;
@ -1091,7 +1091,7 @@ nvme_tcp_qpair_h2c_data_send_complete(void *cb_arg)
}
static void
spdk_nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
nvme_tcp_send_h2c_data(struct nvme_tcp_req *tcp_req)
{
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(tcp_req->req->qpair);
struct nvme_tcp_pdu *rsp_pdu;
@ -1200,7 +1200,7 @@ nvme_tcp_r2t_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
tcp_req->r2tl_remain = r2t->r2tl;
nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
spdk_nvme_tcp_send_h2c_data(tcp_req);
nvme_tcp_send_h2c_data(tcp_req);
return;
end:

View File

@ -663,7 +663,7 @@ fs_load_cb(__attribute__((unused)) void *ctx,
}
static void
spdk_rocksdb_run(__attribute__((unused)) void *arg1)
rocksdb_run(__attribute__((unused)) void *arg1)
{
struct spdk_bdev *bdev;
@ -691,7 +691,7 @@ fs_unload_cb(__attribute__((unused)) void *ctx,
}
static void
spdk_rocksdb_shutdown(void)
rocksdb_shutdown(void)
{
if (g_fs != NULL) {
spdk_fs_unload(g_fs, fs_unload_cb, NULL);
@ -706,7 +706,7 @@ initialize_spdk(void *arg)
struct spdk_app_opts *opts = (struct spdk_app_opts *)arg;
int rc;
rc = spdk_app_start(opts, spdk_rocksdb_run, NULL);
rc = spdk_app_start(opts, rocksdb_run, NULL);
/*
* TODO: Revisit for case of internal failure of
* spdk_app_start(), itself. At this time, it's known
@ -734,7 +734,7 @@ SpdkEnv::SpdkEnv(Env *base_env, const std::string &dir, const std::string &conf,
spdk_app_opts_init(opts);
opts->name = "rocksdb";
opts->config_file = mConfig.c_str();
opts->shutdown_cb = spdk_rocksdb_shutdown;
opts->shutdown_cb = rocksdb_shutdown;
spdk_fs_set_cache_size(cache_size_in_mb);
g_bdev_name = mBdev;
@ -744,7 +744,7 @@ SpdkEnv::SpdkEnv(Env *base_env, const std::string &dir, const std::string &conf,
;
if (g_spdk_start_failure) {
delete opts;
throw SpdkAppStartException("spdk_app_start() unable to start spdk_rocksdb_run()");
throw SpdkAppStartException("spdk_app_start() unable to start rocksdb_run()");
}
SpdkInitializeThread();

View File

@ -141,7 +141,7 @@ nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
if (direct_connect == true && probe_ctx->probe_cb) {
nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
ctrlr = spdk_nvme_get_ctrlr_by_trid(&probe_ctx->trid);
ctrlr = nvme_get_ctrlr_by_trid(&probe_ctx->trid);
nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
probe_ctx->probe_cb(probe_ctx->cb_ctx, &probe_ctx->trid, &ctrlr->opts);
}
@ -788,21 +788,21 @@ test_nvme_ctrlr_probe(void)
/* test when probe_cb returns false */
MOCK_SET(dummy_probe_cb, false);
spdk_nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
CU_ASSERT(rc == 1);
/* probe_cb returns true but we can't construct a ctrl */
MOCK_SET(dummy_probe_cb, true);
MOCK_SET(nvme_transport_ctrlr_construct, NULL);
spdk_nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
CU_ASSERT(rc == -1);
/* happy path */
MOCK_SET(dummy_probe_cb, true);
MOCK_SET(nvme_transport_ctrlr_construct, &ctrlr);
spdk_nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
CU_ASSERT(rc == 0);
dummy = TAILQ_FIRST(&probe_ctx.init_ctrlrs);
@ -1315,8 +1315,8 @@ test_nvme_ctrlr_probe_internal(void)
ut_test_probe_internal = true;
MOCK_SET(dummy_probe_cb, true);
trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
spdk_nvme_probe_ctx_init(probe_ctx, &trid, NULL, dummy_probe_cb, NULL, NULL);
rc = spdk_nvme_probe_internal(probe_ctx, false);
nvme_probe_ctx_init(probe_ctx, &trid, NULL, dummy_probe_cb, NULL, NULL);
rc = nvme_probe_internal(probe_ctx, false);
CU_ASSERT(rc < 0);
CU_ASSERT(TAILQ_EMPTY(&probe_ctx->init_ctrlrs));

View File

@ -145,7 +145,7 @@ DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_dev
DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0);
DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false);
DEFINE_STUB(spdk_nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
(const struct spdk_nvme_transport_id *trid), NULL);
DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
(struct spdk_nvme_ctrlr *ctrlr), {});
@ -343,14 +343,14 @@ test_nvme_pcie_hotplug_monitor(void)
CU_ASSERT(STAILQ_EMPTY(&g_uevents));
STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
MOCK_SET(spdk_nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
CU_ASSERT(STAILQ_EMPTY(&g_uevents));
CU_ASSERT(pctrlr.ctrlr.is_failed == true);
pctrlr.ctrlr.is_failed = false;
MOCK_CLEAR(spdk_nvme_get_ctrlr_by_trid_unsafe);
MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
/* Case 4: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO */
entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_VFIO;
@ -358,14 +358,14 @@ test_nvme_pcie_hotplug_monitor(void)
snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
CU_ASSERT(STAILQ_EMPTY(&g_uevents));
STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
MOCK_SET(spdk_nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
CU_ASSERT(STAILQ_EMPTY(&g_uevents));
CU_ASSERT(pctrlr.ctrlr.is_failed == true);
pctrlr.ctrlr.is_failed = false;
MOCK_CLEAR(spdk_nvme_get_ctrlr_by_trid_unsafe);
MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
/* Case 5: Removed device detected in another process */
pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;