spelling: module
Part of #2256 * calculated * changing * deferred * deinitialize * initialization * particular * receive * request * retrieve * satisfied * succeed * thread * unplugged * unregister Change-Id: I13e38f9160cb1a15a87cb5974785a34604124fa3 Signed-off-by: Josh Soref <jsoref@gmail.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10406 Community-CI: Mellanox Build Bot Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
parent
a82b365b90
commit
1960ef167a
@ -99,7 +99,7 @@ static TAILQ_HEAD(, compress_dev) g_compress_devs = TAILQ_HEAD_INITIALIZER(g_com
|
||||
struct comp_device_qp {
|
||||
struct compress_dev *device; /* ptr to compression device */
|
||||
uint8_t qp; /* queue pair for this node */
|
||||
struct spdk_thread *thread; /* thead that this qp is assigned to */
|
||||
struct spdk_thread *thread; /* thread that this qp is assigned to */
|
||||
TAILQ_ENTRY(comp_device_qp) link;
|
||||
};
|
||||
static TAILQ_HEAD(, comp_device_qp) g_comp_device_qp = TAILQ_HEAD_INITIALIZER(g_comp_device_qp);
|
||||
@ -982,7 +982,7 @@ _delete_vol_unload_cb(void *ctx)
|
||||
{
|
||||
struct vbdev_compress *comp_bdev = ctx;
|
||||
|
||||
/* FIXME: Assert if these conditions are not satisified for now. */
|
||||
/* FIXME: Assert if these conditions are not satisfied for now. */
|
||||
assert(!comp_bdev->reduce_thread ||
|
||||
comp_bdev->reduce_thread == spdk_get_thread());
|
||||
|
||||
|
@ -583,7 +583,7 @@ crypto_dev_poller(void *args)
|
||||
for (i = 0; i < num_dequeued_ops; i++) {
|
||||
|
||||
/* We don't know the order or association of the crypto ops wrt any
|
||||
* partiular bdev_io so need to look at each and determine if it's
|
||||
* particular bdev_io so need to look at each and determine if it's
|
||||
* the last one for it's bdev_io or not.
|
||||
*/
|
||||
bdev_io = (struct spdk_bdev_io *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset,
|
||||
|
@ -546,7 +546,7 @@ delay_bdev_ch_create_cb(void *io_device, void *ctx_buf)
|
||||
|
||||
/* We provide this callback for the SPDK channel code to destroy a channel
|
||||
* created with our create callback. We just need to undo anything we did
|
||||
* when we created. If this bdev used its own poller, we'd unregsiter it here.
|
||||
* when we created. If this bdev used its own poller, we'd unregister it here.
|
||||
*/
|
||||
static void
|
||||
delay_bdev_ch_destroy_cb(void *io_device, void *ctx_buf)
|
||||
|
@ -472,7 +472,7 @@ bdev_ftl_finish(void)
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_ftl_create_defered_cb(const struct ftl_bdev_info *info, void *ctx, int status)
|
||||
bdev_ftl_create_deferred_cb(const struct ftl_bdev_info *info, void *ctx, int status)
|
||||
{
|
||||
struct ftl_deferred_init *opts = ctx;
|
||||
|
||||
@ -501,8 +501,8 @@ bdev_ftl_examine(struct spdk_bdev *bdev)
|
||||
|
||||
LIST_REMOVE(opts, entry);
|
||||
|
||||
/* spdk_bdev_module_examine_done will be called by bdev_ftl_create_defered_cb */
|
||||
if (bdev_ftl_create_bdev(&opts->opts, bdev_ftl_create_defered_cb, opts)) {
|
||||
/* spdk_bdev_module_examine_done will be called by bdev_ftl_create_deferred_cb */
|
||||
if (bdev_ftl_create_bdev(&opts->opts, bdev_ftl_create_deferred_cb, opts)) {
|
||||
SPDK_ERRLOG("Failed to initialize FTL bdev '%s'\n", opts->opts.name);
|
||||
bdev_ftl_defer_free(opts);
|
||||
break;
|
||||
|
@ -202,7 +202,7 @@ gpt_read_header(struct spdk_gpt *gpt)
|
||||
to_le32(&head->header_crc32, original_crc);
|
||||
|
||||
if (new_crc != original_crc) {
|
||||
SPDK_ERRLOG("head crc32 does not match, provided=%u, caculated=%u\n",
|
||||
SPDK_ERRLOG("head crc32 does not match, provided=%u, calculated=%u\n",
|
||||
original_crc, new_crc);
|
||||
return -1;
|
||||
}
|
||||
|
@ -767,7 +767,7 @@ bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch)
|
||||
* is likely to be non-accessible now but may become accessible.
|
||||
*
|
||||
* If any io_path has an unfailed ctrlr but find_io_path() returned NULL, the ctrlr
|
||||
* is likely to be resetting now but the reset may succeeed. A ctrlr is set to unfailed
|
||||
* is likely to be resetting now but the reset may succeed. A ctrlr is set to unfailed
|
||||
* when starting to reset it but it is set to failed when the reset failed. Hence, if
|
||||
* a ctrlr is unfailed, it is likely that it works fine or is resetting.
|
||||
*/
|
||||
|
@ -1323,7 +1323,7 @@ vbdev_ocf_module_fini(void)
|
||||
vbdev_ocf_ctx_cleanup();
|
||||
}
|
||||
|
||||
/* When base device gets unpluged this is called
|
||||
/* When base device gets unplugged this is called
|
||||
* We will unregister cache vbdev here
|
||||
* When cache device is removed, we delete every OCF bdev that used it */
|
||||
static void
|
||||
|
@ -67,7 +67,7 @@ struct vbdev_ocf_state {
|
||||
bool doing_clean_delete;
|
||||
/* From the moment when finish started */
|
||||
bool doing_finish;
|
||||
/* From the moment when reset IO recieved, until it is completed */
|
||||
/* From the moment when reset IO received, until it is completed */
|
||||
bool doing_reset;
|
||||
/* From the moment when exp_bdev is registered */
|
||||
bool started;
|
||||
|
@ -1253,7 +1253,7 @@ raid_bdev_deconfigure(struct raid_bdev *raid_bdev, raid_bdev_destruct_cb cb_fn,
|
||||
raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
|
||||
assert(raid_bdev->num_base_bdevs_discovered);
|
||||
TAILQ_INSERT_TAIL(&g_raid_bdev_offline_list, raid_bdev, state_link);
|
||||
SPDK_DEBUGLOG(bdev_raid, "raid bdev state chaning from online to offline\n");
|
||||
SPDK_DEBUGLOG(bdev_raid, "raid bdev state changing from online to offline\n");
|
||||
|
||||
spdk_bdev_unregister(&raid_bdev->bdev, cb_fn, cb_arg);
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ _zone_block_complete_unmap(struct spdk_bdev_io *bdev_io, bool success, void *cb_
|
||||
int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
|
||||
|
||||
/* Complete the original IO and then free the one that we created here
|
||||
* as a result of issuing an IO via submit_reqeust.
|
||||
* as a result of issuing an IO via submit_request.
|
||||
*/
|
||||
spdk_bdev_io_complete(orig_io, status);
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
@ -376,7 +376,7 @@ _zone_block_complete_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_
|
||||
}
|
||||
|
||||
/* Complete the original IO and then free the one that we created here
|
||||
* as a result of issuing an IO via submit_reqeust.
|
||||
* as a result of issuing an IO via submit_request.
|
||||
*/
|
||||
spdk_bdev_io_complete(orig_io, status);
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
@ -471,7 +471,7 @@ _zone_block_complete_read(struct spdk_bdev_io *bdev_io, bool success, void *cb_a
|
||||
int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
|
||||
|
||||
/* Complete the original IO and then free the one that we created here
|
||||
* as a result of issuing an IO via submit_reqeust.
|
||||
* as a result of issuing an IO via submit_request.
|
||||
*/
|
||||
spdk_bdev_io_complete(orig_io, status);
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
|
@ -119,7 +119,7 @@ _init_core(uint32_t lcore_id)
|
||||
|
||||
rc = rte_power_get_capabilities(lcore_id, &caps);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("Failed retrievie capabilities of core%d\n", lcore_id);
|
||||
SPDK_ERRLOG("Failed retrieve capabilities of core%d\n", lcore_id);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -152,7 +152,7 @@ _init(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* When initalization of a core failed, deinitalize prior cores. */
|
||||
/* When initialization of a core failed, deinitialize prior cores. */
|
||||
SPDK_ENV_FOREACH_CORE(j) {
|
||||
if (j >= i) {
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user