spelling: lib

Part of #2256

* accessible
* activation
* additional
* allocate
* association
* attempt
* barrier
* broadcast
* buffer
* calculate
* cases
* channel
* children
* command
* completion
* connect
* copied
* currently
* descriptor
* destroy
* detachment
* doesn't
* enqueueing
* exceeds
* execution
* extended
* fallback
* finalize
* first
* handling
* hugepages
* ignored
* implementation
* in_capsule
* initialization
* initialized
* initializing
* initiator
* negotiated
* notification
* occurred
* original
* outstanding
* partially
* partition
* processing
* receive
* received
* receiving
* redirected
* regions
* request
* requested
* response
* retrieved
* running
* satisfied
* should
* snapshot
* status
* succeeds
* successfully
* supplied
* those
* transferred
* translate
* triggering
* unregister
* unsupported
* urlsafe
* virtqueue
* volumes
* workaround
* zeroed

Change-Id: I569218754bd9d332ba517d4a61ad23d29eedfd0c
Signed-off-by: Josh Soref <jsoref@gmail.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10405
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Josh Soref 2021-11-24 20:40:58 -05:00 committed by Tomasz Zawadzki
parent 56f8181ad5
commit cc6920a476
55 changed files with 144 additions and 143 deletions

View File

@ -45,8 +45,8 @@
/* Accelerator Engine Framework: The following provides a top level
* generic API for the accelerator functions defined here. Modules,
* such as the one in /module/accel/ioat, supply the implemention
* with the exception of the pure software implemention contained
* such as the one in /module/accel/ioat, supply the implementation
* with the exception of the pure software implementation contained
* later in this file.
*/
@ -895,7 +895,7 @@ spdk_accel_batch_cancel(struct spdk_io_channel *ch, struct spdk_accel_batch *bat
struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
struct spdk_accel_task *accel_task;
/* Cancel anything currently oustanding for this batch. */
/* Cancel anything currently outstanding for this batch. */
while ((batch = TAILQ_FIRST(&accel_ch->batches))) {
TAILQ_REMOVE(&accel_ch->batches, batch, link);
while ((accel_task = TAILQ_FIRST(&batch->hw_tasks))) {

View File

@ -2208,7 +2208,7 @@ blob_persist_start(struct spdk_blob_persist_ctx *ctx)
assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages);
ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1;
} else {
/* No change in size occured */
/* No change in size occurred */
blob_persist_generate_new_md(ctx);
return;
}
@ -3591,7 +3591,7 @@ bs_delete_corrupted_blob(void *cb_arg, int bserrno)
}
/* Snapshot and clone have the same copy of cluster map and extent pages
* at this point. Let's clear both for snpashot now,
* at this point. Let's clear both for snapshot now,
* so that it won't be cleared for clone later when we remove snapshot.
* Also set thin provision to pass data corruption check */
for (i = 0; i < ctx->blob->active.num_clusters; i++) {
@ -3646,11 +3646,11 @@ bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
}
if (blob->parent_id == ctx->blob->id) {
/* Power failure occured before updating clone (snapshot delete case)
/* Power failure occurred before updating clone (snapshot delete case)
* or after updating clone (creating snapshot case) - keep snapshot */
spdk_blob_close(blob, bs_update_corrupted_blob, ctx);
} else {
/* Power failure occured after updating clone (snapshot delete case)
/* Power failure occurred after updating clone (snapshot delete case)
* or before updating clone (creating snapshot case) - remove snapshot */
spdk_blob_close(blob, bs_delete_corrupted_blob, ctx);
}
@ -5727,7 +5727,7 @@ bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
bs_snapshot_swap_cluster_maps(newblob, origblob);
/* Newblob md sync failed. Valid clusters are only present in origblob.
* Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occured.
* Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred.
* Newblob needs to be reverted to thin_provisioned state at creation to properly close. */
blob_set_thin_provision(newblob);
assert(spdk_mem_all_zero(newblob->active.clusters,
@ -6628,7 +6628,7 @@ delete_snapshot_update_extent_pages(void *cb_arg, int bserrno)
continue;
}
/* Clone and snapshot both contain partialy filled matching extent pages.
/* Clone and snapshot both contain partially filled matching extent pages.
* Update the clone extent page in place with cluster map containing the mix of both. */
ctx->next_extent_page = i + 1;

View File

@ -170,7 +170,7 @@ struct spdk_blob {
TAILQ_HEAD(, spdk_blob_persist_ctx) pending_persists;
TAILQ_HEAD(, spdk_blob_persist_ctx) persists_to_complete;
/* Number of data clusters retrived from extent table,
/* Number of data clusters retrieved from extent table,
* that many have to be read from extent pages. */
uint64_t remaining_clusters_in_et;
};

View File

@ -203,7 +203,7 @@ parse_subsystem_event(const char *buf, struct spdk_pci_event *event)
* VFIO hotplug interface is "pci.c:pci_device_rte_dev_event".
* VFIO informs the userspace hotplug through vfio req notifier interrupt.
* The app needs to free the device userspace driver resource first then
* the OS remove the device VFIO driver and boardcast the VFIO uevent.
* the OS remove the device VFIO driver and broadcast the VFIO uevent.
*/
return 0;
}

View File

@ -42,7 +42,7 @@
* It depends on memory usage of OCF which
* in itself depends on the workload
* It is a big number because OCF uses allocators
* for every request it sends and recieves
* for every request it sends and receives
*/
#define ENV_ALLOCATOR_NBUFS 32767
#define GET_ELEMENTS_COUNT(_limit) (_limit < 0 ? ENV_ALLOCATOR_NBUFS : _limit)
@ -160,7 +160,7 @@ static void __attribute__((destructor)) deinit_execution_context(void)
free(exec_context_mutex);
}
/* get_execuction_context must assure that after the call finishes, the caller
/* get_execution_context must assure that after the call finishes, the caller
* will not get preempted from current execution context. For userspace env
* we simulate this behavior by acquiring per execution context mutex. As a
* result the caller might actually get preempted, but no other thread will

View File

@ -145,7 +145,7 @@ static inline void *env_vmalloc(size_t size)
static inline void *env_vzalloc(size_t size)
{
/* TODO: raw_ram init can request huge amount of memory to store
* hashtable in it. need to ensure that allocation succedds */
* hashtable in it. need to ensure that allocation succeeds */
return spdk_zmalloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY,
SPDK_MALLOC_DMA);
}

View File

@ -543,7 +543,7 @@ spdk_app_start(struct spdk_app_opts *opts_user, spdk_msg_fn start_fn,
SPDK_NOTICELOG("Total cores available: %d\n", spdk_env_get_core_count());
if ((rc = spdk_reactors_init()) != 0) {
SPDK_ERRLOG("Reactor Initilization failed: rc = %d\n", rc);
SPDK_ERRLOG("Reactor Initialization failed: rc = %d\n", rc);
return 1;
}

View File

@ -459,7 +459,7 @@ spdk_reactor_set_interrupt_mode(uint32_t lcore, bool new_in_interrupt,
assert(ev);
spdk_event_call(ev);
} else {
/* For race caces, when setting the reactor to interrupt mode, first set the
/* For race cases, when setting the reactor to interrupt mode, first set the
* corresponding bit of the notify_cpuset of each reactor and then change the mode.
*/
spdk_for_each_reactor(_reactor_set_notify_cpuset, target, NULL, _reactor_set_notify_cpuset_cpl);

View File

@ -1144,7 +1144,7 @@ ftl_read_next_logical_addr(struct ftl_io *io, struct ftl_addr *addr)
SPDK_DEBUGLOG(ftl_core, "Read addr:%lx, lba:%lu\n",
addr->offset, ftl_io_current_lba(io));
/* If the address is invalid, skip it (the buffer should already be zero'ed) */
/* If the address is invalid, skip it (the buffer should already be zeroed) */
if (ftl_addr_invalid(*addr)) {
return -EFAULT;
}

View File

@ -1322,7 +1322,7 @@ ftl_dev_init_base_bdev(struct spdk_ftl_dev *dev, const char *bdev_name)
}
if (!spdk_bdev_is_zoned(bdev)) {
SPDK_ERRLOG("Bdev dosen't support zone capabilities: %s\n",
SPDK_ERRLOG("Bdev doesn't support zone capabilities: %s\n",
spdk_bdev_get_name(bdev));
return -1;
}
@ -1358,7 +1358,7 @@ ftl_dev_init_base_bdev(struct spdk_ftl_dev *dev, const char *bdev_name)
if (ftl_is_append_supported(dev) &&
!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZONE_APPEND)) {
SPDK_ERRLOG("Bdev dosen't support append: %s\n",
SPDK_ERRLOG("Bdev doesn't support append: %s\n",
spdk_bdev_get_name(bdev));
return -1;
}

View File

@ -127,7 +127,7 @@ struct ftl_io_init_opts {
struct ftl_io_channel;
struct ftl_wbuf_entry {
/* IO channel that owns the write bufer entry */
/* IO channel that owns the write buffer entry */
struct ftl_io_channel *ioch;
/* Data payload (single block) */
void *payload;

View File

@ -762,7 +762,7 @@ spdk_idxd_batch_submit(struct spdk_idxd_io_channel *chan, struct idxd_batch *bat
return rc;
}
/* TODO: pre-tranlate these when allocated for max batch size. */
/* TODO: pre-translate these when allocated for max batch size. */
rc = _vtophys(batch->user_desc, &desc_addr, batch->index * sizeof(struct idxd_hw_desc));
if (rc) {
TAILQ_INSERT_TAIL(&chan->ops_pool, op, link);

View File

@ -103,7 +103,7 @@ struct spdk_idxd_io_channel {
*/
void *desc_base;
TAILQ_HEAD(, idxd_ops) ops_pool;
/* Current list of oustanding operations to poll. */
/* Current list of outstanding operations to poll. */
TAILQ_HEAD(op_head, idxd_ops) ops_outstanding;
void *ops_base;
@ -159,7 +159,7 @@ struct spdk_idxd_impl {
void (*destruct)(struct spdk_idxd_device *idxd);
void (*dump_sw_error)(struct spdk_idxd_device *idxd, void *portal);
char *(*portal_get_addr)(struct spdk_idxd_device *idxd);
/* It is a workround for simulator */
/* It is a workaround for simulator */
bool (*nop_check)(struct spdk_idxd_device *idxd);
STAILQ_ENTRY(spdk_idxd_impl) link;

View File

@ -320,7 +320,7 @@ _kernel_idxd_probe(void *cb_ctx, spdk_idxd_attach_cb attach_cb, int dev_id)
*/
rc = config_wqs(kernel_idxd, dev_id, 0);
if (rc) {
SPDK_ERRLOG("Failed to probe requsted wqs on kernel device context=%p\n", ctx);
SPDK_ERRLOG("Failed to probe requested wqs on kernel device context=%p\n", ctx);
return -ENODEV;
}

View File

@ -64,7 +64,7 @@
* << MORE "subsystems" ARRAY ENTRIES >>
* ]
*
* << ANYTHING ELSE IS IGNORRED IN ROOT OBJECT>>
* << ANYTHING ELSE IS IGNORED IN ROOT OBJECT>>
* }
*
*/

View File

@ -230,7 +230,7 @@ iscsi_init_grp_add_netmask(struct spdk_iscsi_init_grp *ig, char *mask)
imask = calloc(1, sizeof(*imask));
if (imask == NULL) {
SPDK_ERRLOG("malloc() failed for inititator mask str\n");
SPDK_ERRLOG("malloc() failed for initiator mask str\n");
return -ENOMEM;
}

View File

@ -1448,7 +1448,7 @@ iscsi_op_login_check_target(struct spdk_iscsi_conn *conn,
return SPDK_ISCSI_LOGIN_ERROR_RESPONSE;
}
if (iscsi_tgt_node_is_redirected(conn, *target, buf, MAX_TMPBUF)) {
SPDK_INFOLOG(iscsi, "target %s is redirectd\n", target_name);
SPDK_INFOLOG(iscsi, "target %s is redirected\n", target_name);
rsp_pdu->data_segment_len = iscsi_append_text("TargetAddress",
buf,
rsp_pdu->data,

View File

@ -494,7 +494,7 @@ iscsi_get_max_immediate_data_size(void)
* account for a header digest, data digest and additional header
* segments (AHS). These are not normally used but they do not
* take up much space and we need to make sure the worst-case scenario
* can be satisified by the size returned here.
* can be satisfied by the size returned here.
*/
return g_iscsi.FirstBurstLength +
ISCSI_DIGEST_LEN + /* data digest */

View File

@ -367,7 +367,7 @@ iscsi_send_tgt_portals(struct spdk_iscsi_conn *conn,
host, p->port, pg->tag);
memset(tmp_buf, 0, sizeof(tmp_buf));
/* Caculate the whole string size */
/* Calculate the whole string size */
len = snprintf(NULL, 0, "TargetAddress=%s:%s,%d", host, p->port, pg->tag);
assert(len < MAX_TMPBUF);
@ -431,7 +431,7 @@ iscsi_send_tgts(struct spdk_iscsi_conn *conn, const char *iiqn,
len = snprintf(NULL, 0, "TargetName=%s", target->name);
assert(len < MAX_TMPBUF);
/* String contents are not copyied */
/* String contents are not copied */
if (previous_completed_size < len) {
/* Copy the string into the temporary buffer */
snprintf(tmp_buf, len + 1, "TargetName=%s", target->name);
@ -451,7 +451,7 @@ iscsi_send_tgts(struct spdk_iscsi_conn *conn, const char *iiqn,
}
pthread_mutex_unlock(&g_iscsi.mutex);
/* Only set it when it is not succesufully completed */
/* Only set it when it is not successfully completed */
if (no_buf_space) {
conn->send_tgt_completed_size += total;
} else {

View File

@ -125,7 +125,7 @@ jsonrpc_client_recv(struct spdk_jsonrpc_client *client)
rc = recv(client->sockfd, client->recv_buf + client->recv_offset,
client->recv_buf_size - client->recv_offset - 1, 0);
if (rc < 0) {
/* For EINTR we pretend that nothing was reveived. */
/* For EINTR we pretend that nothing was received. */
if (errno == EINTR) {
return 0;
} else {
@ -308,7 +308,7 @@ spdk_jsonrpc_client_connect(const char *addr, int addr_family)
rc = getaddrinfo(host, port, &hints, &res);
if (rc != 0) {
SPDK_ERRLOG("Unable to look up RPC connnect address '%s' (%d): %s\n", addr, rc, gai_strerror(rc));
SPDK_ERRLOG("Unable to look up RPC connect address '%s' (%d): %s\n", addr, rc, gai_strerror(rc));
rc = -EINVAL;
goto err;
}

View File

@ -652,7 +652,7 @@ lvs_rename_cb(void *cb_arg, int lvolerrno)
if (req->lvserrno != 0) {
SPDK_ERRLOG("Lvol store rename operation failed\n");
/* Lvs renaming failed, so we should 'clear' new_name.
* Otherwise it could cause a failure on the next attepmt to change the name to 'new_name' */
* Otherwise it could cause a failure on the next attempt to change the name to 'new_name' */
snprintf(req->lvol_store->new_name,
sizeof(req->lvol_store->new_name),
"%s", req->lvol_store->name);

View File

@ -1037,7 +1037,7 @@ nbd_enable_kernel(void *arg)
NBD_BUSY_POLLING_INTERVAL_US);
return SPDK_POLLER_BUSY;
} else if (ctx->nbd->retry_count-- > 0) {
/* Repeatedly unregiter and register retry poller to avoid scan-build error */
/* Repeatedly unregister and register retry poller to avoid scan-build error */
spdk_poller_unregister(&ctx->nbd->retry_poller);
ctx->nbd->retry_poller = SPDK_POLLER_REGISTER(nbd_enable_kernel, ctx,
NBD_BUSY_POLLING_INTERVAL_US);

View File

@ -170,7 +170,7 @@ spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr,
return -EINVAL;
}
/* Use a context header to poll detachement for multiple controllers.
/* Use a context header to poll detachment for multiple controllers.
* Allocate an new one if not allocated yet, or use the passed one otherwise.
*/
detach_ctx = *_detach_ctx;

View File

@ -91,7 +91,7 @@ spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
/* Caculate metadata length */
/* Calculate metadata length */
if (md_buf) {
struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid);

View File

@ -235,7 +235,7 @@ struct nvme_payload {
spdk_nvme_req_next_sge_cb next_sge_fn;
/**
* Exended IO options passed by the user
* Extended IO options passed by the user
*/
struct spdk_nvme_ns_cmd_ext_io_opts *opts;
/**
@ -780,7 +780,7 @@ enum nvme_ctrlr_state {
NVME_CTRLR_STATE_READY,
/**
* Controller inilialization has an error.
* Controller initialization has an error.
*/
NVME_CTRLR_STATE_ERROR
};

View File

@ -66,7 +66,7 @@ int nvme_io_msg_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_io_
* This call process requests from the ring, send IO to an allocated qpair or
* admin commands in its context. This call is non-blocking and intended to be
* polled by SPDK thread to provide safe environment for NVMe request
* completition sent by external module to controller.
* completion sent by external module to controller.
*
* The caller must ensure that each controller is polled by only one thread at
* a time.

View File

@ -538,7 +538,7 @@ opal_response_get_u16(const struct spdk_opal_resp_parsed *resp, int index)
{
uint64_t i = opal_response_get_u64(resp, index);
if (i > 0xffffull) {
SPDK_ERRLOG("parse reponse u16 failed. Overflow\n");
SPDK_ERRLOG("parse response u16 failed. Overflow\n");
return 0;
}
return (uint16_t) i;
@ -549,7 +549,7 @@ opal_response_get_u8(const struct spdk_opal_resp_parsed *resp, int index)
{
uint64_t i = opal_response_get_u64(resp, index);
if (i > 0xffull) {
SPDK_ERRLOG("parse reponse u8 failed. Overflow\n");
SPDK_ERRLOG("parse response u8 failed. Overflow\n");
return 0;
}
return (uint8_t) i;
@ -1207,7 +1207,7 @@ opal_activate(struct spdk_opal_dev *dev, struct opal_session *sess)
return err;
}
/* TODO: Single User Mode for activatation */
/* TODO: Single User Mode for activation */
ret = opal_cmd_finalize(sess, sess->hsn, sess->tsn, true);
if (ret) {

View File

@ -1015,7 +1015,7 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
if (spdk_unlikely(!STAILQ_EMPTY(&qpair->queued_req) && req->num_children == 0)) {
/*
* Requests that have no children should be sent to the transport after all
* currently queued requests. Requests with chilren will be split and go back
* currently queued requests. Requests with children will be split and go back
* through this path. We need to make an exception for the fabrics commands
* while the qpair is connecting to be able to send the connect command
* asynchronously.

View File

@ -1953,7 +1953,7 @@ static struct spdk_nvme_ctrlr *nvme_rdma_ctrlr_construct(const struct spdk_nvme_
STAILQ_INIT(&rctrlr->free_cm_events);
rctrlr->cm_events = nvme_rdma_calloc(NVME_RDMA_NUM_CM_EVENTS, sizeof(*rctrlr->cm_events));
if (rctrlr->cm_events == NULL) {
SPDK_ERRLOG("unable to allocat buffers to hold CM events.\n");
SPDK_ERRLOG("unable to allocate buffers to hold CM events.\n");
goto destruct_ctrlr;
}

View File

@ -572,7 +572,7 @@ nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
struct spdk_nvme_ctrlr *ctrlr = tqpair->qpair.ctrlr;
int rc = 0;
enum spdk_nvme_data_transfer xfer;
uint32_t max_incapsule_data_size;
uint32_t max_in_capsule_data_size;
tcp_req->req = req;
req->cmd.cid = tcp_req->cid;
@ -601,12 +601,12 @@ nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
}
if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
max_incapsule_data_size = ctrlr->ioccsz_bytes;
max_in_capsule_data_size = ctrlr->ioccsz_bytes;
if ((req->cmd.opc == SPDK_NVME_OPC_FABRIC) || nvme_qpair_is_admin_queue(&tqpair->qpair)) {
max_incapsule_data_size = SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE;
max_in_capsule_data_size = SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE;
}
if (req->payload_size <= max_incapsule_data_size) {
if (req->payload_size <= max_in_capsule_data_size) {
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
req->cmd.dptr.sgl1.address = 0;
@ -898,7 +898,7 @@ nvme_tcp_pdu_ch_handle(struct nvme_tcp_qpair *tqpair)
}
} else {
if (spdk_unlikely(!nvme_tcp_qpair_recv_state_valid(tqpair))) {
SPDK_ERRLOG("The TCP/IP tqpair connection is not negotitated\n");
SPDK_ERRLOG("The TCP/IP tqpair connection is not negotiated\n");
fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
goto err;
}
@ -1172,7 +1172,7 @@ nvme_tcp_send_icreq_complete(void *cb_arg)
tqpair->flags.icreq_send_ack = true;
if (tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING) {
SPDK_DEBUGLOG(nvme, "tqpair %p %u, finilize icresp\n", tqpair, tqpair->qpair.id);
SPDK_DEBUGLOG(nvme, "tqpair %p %u, finalize icresp\n", tqpair, tqpair->qpair.id);
tqpair->state = NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND;
}
}
@ -1297,7 +1297,7 @@ nvme_tcp_c2h_term_req_hdr_handle(struct nvme_tcp_qpair *tqpair,
enum spdk_nvme_tcp_term_req_fes fes;
if (c2h_term_req->fes > SPDK_NVME_TCP_TERM_REQ_FES_INVALID_DATA_UNSUPPORTED_PARAMETER) {
SPDK_ERRLOG("Fatal Error Stauts(FES) is unknown for c2h_term_req pdu=%p\n", pdu);
SPDK_ERRLOG("Fatal Error Status(FES) is unknown for c2h_term_req pdu=%p\n", pdu);
fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
error_offset = offsetof(struct spdk_nvme_tcp_term_req_hdr, fes);
goto end;
@ -2127,7 +2127,7 @@ static struct spdk_nvme_ctrlr *nvme_tcp_ctrlr_construct(const struct spdk_nvme_t
static uint32_t
nvme_tcp_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
{
/* TCP transport doens't limit maximum IO transfer size. */
/* TCP transport doesn't limit maximum IO transfer size. */
return UINT32_MAX;
}

View File

@ -1584,7 +1584,7 @@ nvmf_ctrlr_get_features_reservation_notification_mask(struct spdk_nvmf_request *
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvmf_ns *ns;
SPDK_DEBUGLOG(nvmf, "get Features - Reservation Notificaton Mask\n");
SPDK_DEBUGLOG(nvmf, "get Features - Reservation Notification Mask\n");
if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) {
SPDK_ERRLOG("get Features - Invalid Namespace ID\n");
@ -1612,7 +1612,7 @@ nvmf_ctrlr_set_features_reservation_notification_mask(struct spdk_nvmf_request *
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
struct spdk_nvmf_ns *ns;
SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Notificaton Mask\n");
SPDK_DEBUGLOG(nvmf, "Set Features - Reservation Notification Mask\n");
if (cmd->nsid == SPDK_NVME_GLOBAL_NS_TAG) {
for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
@ -1960,7 +1960,7 @@ nvmf_get_firmware_slot_log_page(struct iovec *iovs, int iovcnt, uint64_t offset,
* Asynchronous Event Mask Bit
*/
enum spdk_nvme_async_event_mask_bit {
/* Mask Namespace Change Notificaton */
/* Mask Namespace Change Notification */
SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT = 0,
/* Mask Asymmetric Namespace Access Change Notification */
SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT = 1,
@ -3159,7 +3159,7 @@ nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
if (g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr && cmd->opc != SPDK_NVME_OPC_ABORT) {
rc = g_nvmf_custom_admin_cmd_hdlrs[cmd->opc].hdlr(req);
if (rc >= SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
/* The handler took care of this commmand */
/* The handler took care of this command */
return rc;
}
}

View File

@ -1758,7 +1758,7 @@ nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
if (!nvmf_fc_request_execute(fc_req)) {
/* Succesfuly posted, Delete from pending. */
/* Successfully posted, Delete from pending. */
nvmf_fc_request_remove_from_pending(fc_req);
}
@ -1833,7 +1833,7 @@ nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
fc_conn->rsp_count++;
if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
fc_req->transfered_len)) {
fc_req->transferred_len)) {
/* Fill ERSP Len */
to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
sizeof(uint32_t)));
@ -1844,7 +1844,7 @@ nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
fc_conn->rsn++;
/* Fill transfer length */
to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len);
SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
@ -3440,7 +3440,7 @@ out:
} else {
SPDK_DEBUGLOG(nvmf_fc_adm_api,
"NPort %d delete done succesfully, fc port:%d. "
"NPort %d delete done successfully, fc port:%d. "
"rport_cnt:%d\n",
args->nport_handle, args->port_handle, rport_cnt);
}

View File

@ -185,19 +185,19 @@ nvmf_fc_ls_format_rjt(void *buf, uint16_t buflen, uint8_t ls_cmd,
return sizeof(struct spdk_nvmf_fc_ls_rjt);
}
/* ************************************************** */
/* Allocators/Deallocators (assocations, connections, */
/* poller API data) */
/* *************************************************** */
/* Allocators/Deallocators (associations, connections, */
/* poller API data) */
static inline void
nvmf_fc_ls_free_association(struct spdk_nvmf_fc_association *assoc)
{
/* free assocation's send disconnect buffer */
/* free association's send disconnect buffer */
if (assoc->snd_disconn_bufs) {
nvmf_fc_free_srsr_bufs(assoc->snd_disconn_bufs);
}
/* free assocation's connections */
/* free association's connections */
free(assoc->conns_buf);
/* free the association */
@ -366,9 +366,9 @@ nvmf_fc_ls_new_connection(struct spdk_nvmf_fc_association *assoc, uint16_t qid,
return fc_conn;
}
/* End - Allocators/Deallocators (assocations, connections, */
/* poller API data) */
/* ******************************************************** */
/* End - Allocators/Deallocators (associations, connections, */
/* poller API data) */
/* ********************************************************* */
static inline struct spdk_nvmf_fc_association *
nvmf_fc_ls_find_assoc(struct spdk_nvmf_fc_nport *tgtport, uint64_t assoc_id)
@ -883,7 +883,7 @@ nvmf_fc_ls_validate_host(struct spdk_nvmf_subsystem *subsystem, const char *host
}
/* **************************** */
/* LS Reqeust Handler Functions */
/* LS Request Handler Functions */
static void
nvmf_fc_ls_process_cass(uint32_t s_id,

View File

@ -950,7 +950,7 @@ spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
}
}
/* We add the qpair to the group only it is succesfully added into the tgroup */
/* We add the qpair to the group only it is successfully added into the tgroup */
if (rc == 0) {
SPDK_DTRACE_PROBE2(nvmf_poll_group_add_qpair, qpair, spdk_thread_get_id(group->thread));
TAILQ_INSERT_TAIL(&group->qpairs, qpair, link);

View File

@ -121,7 +121,7 @@ struct spdk_nvmf_fc_buffer_desc {
};
/*
* ABTS hadling context
* ABTS handling context
*/
struct spdk_nvmf_fc_abts_ctx {
bool handled;
@ -250,7 +250,7 @@ struct spdk_nvmf_fc_conn {
/* for association's connection list */
TAILQ_ENTRY(spdk_nvmf_fc_conn) assoc_link;
/* for assocations's available connection list */
/* for associations's available connection list */
TAILQ_ENTRY(spdk_nvmf_fc_conn) assoc_avail_link;
/* for hwqp's rport connection list link */
@ -375,7 +375,7 @@ struct spdk_nvmf_fc_request {
struct spdk_nvmf_fc_conn *fc_conn;
struct spdk_nvmf_fc_hwqp *hwqp;
int state;
uint32_t transfered_len;
uint32_t transferred_len;
bool is_aborted;
uint32_t magic;
uint32_t s_id;

View File

@ -178,7 +178,7 @@ struct spdk_nvmf_ns {
struct spdk_bdev *bdev;
struct spdk_bdev_desc *desc;
struct spdk_nvmf_ns_opts opts;
/* reservation notificaton mask */
/* reservation notification mask */
uint32_t mask;
/* generation code */
uint32_t gen;
@ -212,7 +212,7 @@ struct spdk_nvmf_ctrlr_feat {
};
/*
* NVMf reservation notificaton log page.
* NVMf reservation notification log page.
*/
struct spdk_nvmf_reservation_log {
struct spdk_nvme_reservation_notification_log log;
@ -492,7 +492,7 @@ int nvmf_bdev_ctrlr_start_zcopy(struct spdk_bdev *bdev,
* Ends a zcopy operation
*
* \param req The NVMe-oF request
* \param commit Flag indicating whether the buffers shoule be committed
* \param commit Flag indicating whether the buffers should be committed
*
* \return 0 upon success
* \return <0 on error

View File

@ -2956,7 +2956,7 @@ nvmf_process_cm_event(struct spdk_nvmf_transport *transport)
* which triggers RDMA_CM_EVENT_DEVICE_REMOVAL on all cma_ids.
* Once these events are sent to SPDK, we should release all IB resources and
* don't make attempts to call any ibv_query/modify/create functions. We can only call
* ibv_destory* functions to release user space memory allocated by IB. All kernel
* ibv_destroy* functions to release user space memory allocated by IB. All kernel
* resources are already cleaned. */
if (event->id->qp) {
/* If rdma_cm event has a valid `qp` pointer then the event refers to the

View File

@ -2699,7 +2699,7 @@ nvmf_ns_reservation_acquire(struct spdk_nvmf_ns *ns,
/* do nothing */
update_sgroup = false;
} else if (ns->holder == NULL) {
/* fisrt time to acquire the reservation */
/* first time to acquire the reservation */
nvmf_ns_reservation_acquire_reservation(ns, key.crkey, rtype, reg);
} else {
SPDK_ERRLOG("Invalid rtype or current registrant is not holder\n");

View File

@ -208,7 +208,7 @@ struct spdk_nvmf_tcp_req {
* twice at the same time, add a debug flag here for init/fini.
*/
bool pdu_in_use;
bool has_incapsule_data;
bool has_in_capsule_data;
/* transfer_tag */
uint16_t ttag;
@ -394,7 +394,7 @@ nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair)
memset(&tcp_req->rsp, 0, sizeof(tcp_req->rsp));
tcp_req->h2c_offset = 0;
tcp_req->has_incapsule_data = false;
tcp_req->has_in_capsule_data = false;
tcp_req->req.dif_enabled = false;
TAILQ_REMOVE(&tqpair->tcp_req_free_queue, tcp_req, state_link);
@ -972,7 +972,7 @@ nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)
}
}
/* Add addtional 2 members, which will be used for mgmt_pdu and pdu_in_progress owned by the tqpair */
/* Add additional 2 members, which will be used for mgmt_pdu and pdu_in_progress owned by the tqpair */
tqpair->pdus = spdk_dma_zmalloc((tqpair->resource_count + 2) * sizeof(*tqpair->pdus), 0x1000, NULL);
if (!tqpair->pdus) {
SPDK_ERRLOG("Unable to allocate pdu pool on tqpair =%p.\n", tqpair);
@ -1542,7 +1542,7 @@ nvmf_tcp_h2c_data_hdr_handle(struct spdk_nvmf_tcp_transport *ttransport,
if ((h2c_data->datao + h2c_data->datal) > tcp_req->req.length) {
SPDK_DEBUGLOG(nvmf_tcp,
"tcp_req(%p), tqpair=%p, (datao=%u + datal=%u) execeeds requested length=%u\n",
"tcp_req(%p), tqpair=%p, (datao=%u + datal=%u) exceeds requested length=%u\n",
tcp_req, tqpair, h2c_data->datao, h2c_data->datal, tcp_req->req.length);
fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_OUT_OF_RANGE;
goto err;
@ -1683,7 +1683,7 @@ nvmf_tcp_h2c_data_payload_handle(struct spdk_nvmf_tcp_transport *ttransport,
* acknowledged before moving on. */
if (tcp_req->h2c_offset == tcp_req->req.length &&
tcp_req->state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER) {
/* After receving all the h2c data, we need to check whether there is
/* After receiving all the h2c data, we need to check whether there is
* transient transport error */
rsp = &tcp_req->req.rsp->nvme_cpl;
if (spdk_unlikely(rsp->status.sc == SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR)) {
@ -1946,7 +1946,7 @@ nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
}
} else {
if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
SPDK_ERRLOG("The TCP/IP connection is not negotitated\n");
SPDK_ERRLOG("The TCP/IP connection is not negotiated\n");
fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
goto err;
}
@ -2228,7 +2228,7 @@ nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_req *tcp_req,
sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
uint64_t offset = sgl->address;
uint32_t max_len = transport->opts.in_capsule_data_size;
assert(tcp_req->has_incapsule_data);
assert(tcp_req->has_in_capsule_data);
SPDK_DEBUGLOG(nvmf_tcp, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
offset, length);
@ -2474,8 +2474,8 @@ request_transfer_out(struct spdk_nvmf_request *req)
}
static void
nvmf_tcp_set_incapsule_data(struct spdk_nvmf_tcp_qpair *tqpair,
struct spdk_nvmf_tcp_req *tcp_req)
nvmf_tcp_set_in_capsule_data(struct spdk_nvmf_tcp_qpair *tqpair,
struct spdk_nvmf_tcp_req *tcp_req)
{
struct nvme_tcp_pdu *pdu;
uint32_t plen = 0;
@ -2488,7 +2488,7 @@ nvmf_tcp_set_incapsule_data(struct spdk_nvmf_tcp_qpair *tqpair,
}
if (pdu->hdr.common.plen != plen) {
tcp_req->has_incapsule_data = true;
tcp_req->has_in_capsule_data = true;
}
}
@ -2554,15 +2554,15 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
/* If no data to transfer, ready to execute. */
if (tcp_req->req.xfer == SPDK_NVME_DATA_NONE) {
/* Reset the tqpair receving pdu state */
/* Reset the tqpair receiving pdu state */
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
break;
}
nvmf_tcp_set_incapsule_data(tqpair, tcp_req);
nvmf_tcp_set_in_capsule_data(tqpair, tcp_req);
if (!tcp_req->has_incapsule_data) {
if (!tcp_req->has_in_capsule_data) {
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
}
@ -2574,7 +2574,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE);
if (!tcp_req->has_incapsule_data && (&tcp_req->req != STAILQ_FIRST(&group->pending_buf_queue))) {
if (!tcp_req->has_in_capsule_data && (&tcp_req->req != STAILQ_FIRST(&group->pending_buf_queue))) {
SPDK_DEBUGLOG(nvmf_tcp,
"Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n",
tcp_req, tqpair);
@ -2586,7 +2586,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
rc = nvmf_tcp_req_parse_sgl(tcp_req, transport, group);
if (rc < 0) {
STAILQ_REMOVE_HEAD(&group->pending_buf_queue, buf_link);
/* Reset the tqpair receving pdu state */
/* Reset the tqpair receiving pdu state */
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
tcp_req->req.rsp->nvme_cpl.cid = tcp_req->req.cmd->nvme_cmd.cid;
@ -2680,8 +2680,9 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, tqpair);
if (tcp_req->req.data_from_pool) {
spdk_nvmf_request_free_buffers(&tcp_req->req, group, transport);
} else if (spdk_unlikely(tcp_req->has_incapsule_data && (tcp_req->cmd.opc == SPDK_NVME_OPC_FABRIC ||
tqpair->qpair.qid == 0) && tcp_req->req.length > transport->opts.in_capsule_data_size)) {
} else if (spdk_unlikely(tcp_req->has_in_capsule_data &&
(tcp_req->cmd.opc == SPDK_NVME_OPC_FABRIC ||
tqpair->qpair.qid == 0) && tcp_req->req.length > transport->opts.in_capsule_data_size)) {
tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
assert(tgroup->control_msg_list);
SPDK_DEBUGLOG(nvmf_tcp, "Put buf to control msg list\n");

View File

@ -895,7 +895,7 @@ post_completion(struct nvmf_vfio_user_ctrlr *ctrlr, struct nvme_q *cq,
/*
* this function now executes at SPDK thread context, we
* might be triggerring interrupts from vfio-user thread context so
* might be triggering interrupts from vfio-user thread context so
* check for race conditions.
*/
if (ctrlr_interrupt_enabled(ctrlr) && cq->ien) {

View File

@ -407,7 +407,7 @@ _alloc_zero_buff(void)
{
int rc = 0;
/* The zero buffer is shared between all volumnes and just used
/* The zero buffer is shared between all volumes and just used
* for reads so allocate one global instance here if not already
* allocated when another vol init'd or loaded.
*/

View File

@ -939,7 +939,7 @@ bdev_scsi_mode_sense_page(struct spdk_bdev *bdev,
/* Obsolete (Medium Types Supported) */
break;
case 0x0c:
/* Obsolete (Notch And Partitio) */
/* Obsolete (Notch And Partition) */
break;
case 0x0d:
/* Obsolete */
@ -1117,14 +1117,14 @@ bdev_scsi_mode_sense(struct spdk_bdev *bdev, int md,
hdr[0] = total - 1; /* Mode Data Length */
hdr[1] = 0; /* Medium Type */
hdr[2] = 0; /* Device-Specific Parameter */
hdr[3] = blen; /* Block Descripter Length */
hdr[3] = blen; /* Block Descriptor Length */
} else {
to_be16(&hdr[0], total - 2); /* Mode Data Length */
hdr[2] = 0; /* Medium Type */
hdr[3] = 0; /* Device-Specific Parameter */
hdr[4] = llbaa ? 0x1 : 0; /* Long/short LBA */
hdr[5] = 0; /* Reserved */
to_be16(&hdr[6], blen); /* Block Descripter Length */
to_be16(&hdr[6], blen); /* Block Descriptor Length */
}
bdesc = &data[hlen];

View File

@ -203,7 +203,7 @@ scsi_pr_out_reserve(struct spdk_scsi_task *task,
/* TODO: don't support now */
if (spec_i_pt || all_tg_pt || aptpl) {
SPDK_ERRLOG("Unspported spec_i_pt/all_tg_pt fields "
SPDK_ERRLOG("Unsupported spec_i_pt/all_tg_pt fields "
"or invalid aptpl field\n");
spdk_scsi_task_set_status(task, SPDK_SCSI_STATUS_CHECK_CONDITION,
SPDK_SCSI_SENSE_ILLEGAL_REQUEST,

View File

@ -104,7 +104,7 @@ struct spdk_poller {
};
enum spdk_thread_state {
/* The thread is pocessing poller and message by spdk_thread_poll(). */
/* The thread is processing poller and message by spdk_thread_poll(). */
SPDK_THREAD_STATE_RUNNING,
/* The thread is in the process of termination. It reaps unregistering
@ -1547,7 +1547,7 @@ poller_register(spdk_poller_fn fn,
spdk_poller_register_interrupt(poller, period_poller_set_interrupt_mode, NULL);
} else {
/* If the poller doesn't have a period, create interruptfd that's always
* busy automatically when runnning in interrupt mode.
* busy automatically when running in interrupt mode.
*/
rc = busy_poller_interrupt_init(poller);
if (rc > 0) {
@ -2652,7 +2652,7 @@ spdk_interrupt_mode_enable(void)
* g_spdk_msg_mempool will be valid if thread library is initialized.
*/
if (g_spdk_msg_mempool) {
SPDK_ERRLOG("Failed due to threading library is already initailzied.\n");
SPDK_ERRLOG("Failed due to threading library is already initialized.\n");
return -1;
}

View File

@ -277,7 +277,7 @@ spdk_trace_parser::populate_events(spdk_trace_history *history, int num_entries)
/*
* We keep track of the highest first TSC out of all reactors.
* We will ignore any events that occured before this TSC on any
* We will ignore any events that occurred before this TSC on any
* other reactors. This will ensure we only print data for the
* subset of time where we have data across all reactors.
*/

View File

@ -53,7 +53,7 @@ static const char base64_enc_table[] =
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
static const char base64_urfsafe_enc_table[] =
static const char base64_urlsafe_enc_table[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_";
@ -156,7 +156,7 @@ spdk_base64_encode(char *dst, const void *src, size_t src_len)
int
spdk_base64_urlsafe_encode(char *dst, const void *src, size_t src_len)
{
return base64_encode(dst, base64_urfsafe_enc_table, src, src_len);
return base64_encode(dst, base64_urlsafe_enc_table, src, src_len);
}
#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)

View File

@ -47,7 +47,7 @@ struct vfio_user_request {
struct vfio_user_header hdr;
#define VFIO_USER_MAX_PAYLOAD_SIZE (4096)
uint8_t payload[VFIO_USER_MAX_PAYLOAD_SIZE];
int fds[VFIO_MAXIMUM_SPARSE_MMAP_REGISONS];
int fds[VFIO_MAXIMUM_SPARSE_MMAP_REGIONS];
int fd_num;
};
@ -75,7 +75,7 @@ vfio_user_write(int fd, void *buf, int len, int *fds, int num_fds)
struct msghdr msgh;
struct iovec iov;
size_t fd_size = num_fds * sizeof(int);
char control[CMSG_SPACE(VFIO_MAXIMUM_SPARSE_MMAP_REGISONS * sizeof(int))];
char control[CMSG_SPACE(VFIO_MAXIMUM_SPARSE_MMAP_REGIONS * sizeof(int))];
struct cmsghdr *cmsg;
memset(&msgh, 0, sizeof(msgh));
@ -87,7 +87,7 @@ vfio_user_write(int fd, void *buf, int len, int *fds, int num_fds)
msgh.msg_iov = &iov;
msgh.msg_iovlen = 1;
assert(num_fds <= VFIO_MAXIMUM_SPARSE_MMAP_REGISONS);
assert(num_fds <= VFIO_MAXIMUM_SPARSE_MMAP_REGIONS);
if (fds && num_fds) {
msgh.msg_control = control;
@ -119,7 +119,7 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int *fd_num)
{
struct iovec iov;
struct msghdr msgh;
char control[CMSG_SPACE(VFIO_MAXIMUM_SPARSE_MMAP_REGISONS * sizeof(int))];
char control[CMSG_SPACE(VFIO_MAXIMUM_SPARSE_MMAP_REGIONS * sizeof(int))];
struct cmsghdr *cmsg;
int got_fds = 0;
int ret;
@ -148,7 +148,7 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int *fd_num)
(cmsg->cmsg_type == SCM_RIGHTS)) {
got_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
*fd_num = got_fds;
assert(got_fds <= VFIO_MAXIMUM_SPARSE_MMAP_REGISONS);
assert(got_fds <= VFIO_MAXIMUM_SPARSE_MMAP_REGIONS);
memcpy(fds, CMSG_DATA(cmsg), got_fds * sizeof(int));
break;
}

View File

@ -42,7 +42,7 @@
/* Maximum memory regions supported */
#define VFIO_MAXIMUM_MEMORY_REGIONS 128
/* Maximum sparse memory regions in one BAR region */
#define VFIO_MAXIMUM_SPARSE_MMAP_REGISONS 8
#define VFIO_MAXIMUM_SPARSE_MMAP_REGIONS 8
struct vfio_memory_region {
uint64_t iova;
@ -64,7 +64,7 @@ struct vfio_pci_region {
size_t size;
uint64_t flags;
uint32_t nr_mmaps;
struct vfio_sparse_mmaps mmaps[VFIO_MAXIMUM_SPARSE_MMAP_REGISONS];
struct vfio_sparse_mmaps mmaps[VFIO_MAXIMUM_SPARSE_MMAP_REGIONS];
};
struct vfio_device {

View File

@ -297,7 +297,7 @@ vfio_device_map_bars_and_config_region(struct vfio_device *device)
uint32_t i;
int ret;
size_t len = 4096;
int fds[VFIO_MAXIMUM_SPARSE_MMAP_REGISONS];
int fds[VFIO_MAXIMUM_SPARSE_MMAP_REGIONS];
struct vfio_region_info *info;
uint8_t *buf;
@ -312,7 +312,7 @@ vfio_device_map_bars_and_config_region(struct vfio_device *device)
memset(fds, 0, sizeof(fds));
info->index = i;
ret = vfio_user_get_dev_region_info(device, info, len, fds, VFIO_MAXIMUM_SPARSE_MMAP_REGISONS);
ret = vfio_user_get_dev_region_info(device, info, len, fds, VFIO_MAXIMUM_SPARSE_MMAP_REGIONS);
if (ret) {
SPDK_ERRLOG("Device setup bar %d failed\n", ret);
free(buf);

View File

@ -65,7 +65,7 @@ static sem_t g_dpdk_sem;
static int g_dpdk_response;
struct vhost_session_fn_ctx {
/** Device pointer obtained before enqueuing the event */
/** Device pointer obtained before enqueueing the event */
struct spdk_vhost_dev *vdev;
/** ID of the session to send event to. */

View File

@ -168,7 +168,7 @@ invalid_blk_request(struct spdk_vhost_blk_task *task, uint8_t status)
/*
* Process task's descriptor chain and setup data related fields.
* Return
* total size of suplied buffers
* total size of supplied buffers
*
* FIXME: Make this function return to rd_cnt and wr_cnt
*/
@ -487,7 +487,7 @@ process_blk_request(struct spdk_vhost_blk_task *task,
type = req->type;
#ifdef VIRTIO_BLK_T_BARRIER
/* Don't care about barier for now (as QEMU's virtio-blk do). */
/* Don't care about barrier for now (as QEMU's virtio-blk do). */
type &= ~VIRTIO_BLK_T_BARRIER;
#endif
@ -562,7 +562,7 @@ process_blk_request(struct spdk_vhost_blk_task *task,
}
/* Unmap this range, SPDK doesn't support it, kernel will enable this flag by default
* without checking unmap feature is negociated or not, the flag isn't mandatory, so
* without checking unmap feature is negotiated or not, the flag isn't mandatory, so
* just print a warning.
*/
if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
@ -1233,7 +1233,7 @@ alloc_task_pool(struct spdk_vhost_blk_session *bvsession)
task_cnt = vq->vring.size;
if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
/* sanity check */
SPDK_ERRLOG("%s: virtuque %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n",
SPDK_ERRLOG("%s: virtqueue %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n",
vsession->name, i, task_cnt, SPDK_VHOST_MAX_VQ_SIZE);
free_task_pool(bvsession);
return -1;

View File

@ -184,7 +184,7 @@ struct spdk_vhost_dev {
const struct spdk_vhost_dev_backend *backend;
/* Saved orginal values used to setup coalescing to avoid integer
/* Saved original values used to setup coalescing to avoid integer
* rounding issues during save/load config.
*/
uint32_t coalescing_delay_us;
@ -251,7 +251,7 @@ uint16_t vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs
/**
* Get a virtio split descriptor at given index in given virtqueue.
* The descriptor will provide access to the entire descriptor
* chain. The subsequent descriptors are accesible via
* chain. The subsequent descriptors are accessible via
* \c spdk_vhost_vring_desc_get_next.
* \param vsession vhost session
* \param vq virtqueue
@ -272,7 +272,7 @@ int vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_vir
/**
* Get a virtio packed descriptor at given index in given virtqueue.
* The descriptor will provide access to the entire descriptor
* chain. The subsequent descriptors are accesible via
* chain. The subsequent descriptors are accessible via
* \c vhost_vring_packed_desc_get_next.
* \param vsession vhost session
* \param vq virtqueue

View File

@ -1027,7 +1027,7 @@ vhost_scsi_session_add_tgt(struct spdk_vhost_dev *vdev,
rc = spdk_scsi_dev_allocate_io_channels(svsession->scsi_dev_state[scsi_tgt_num].dev);
if (rc != 0) {
SPDK_ERRLOG("%s: Couldn't allocate io channnel for SCSI target %u.\n",
SPDK_ERRLOG("%s: Couldn't allocate io channel for SCSI target %u.\n",
vsession->name, scsi_tgt_num);
/* unset the SCSI target so that all I/O to it will be rejected */
@ -1337,7 +1337,7 @@ alloc_task_pool(struct spdk_vhost_scsi_session *svsession)
task_cnt = vq->vring.size;
if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
/* sanity check */
SPDK_ERRLOG("%s: virtuque %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n",
SPDK_ERRLOG("%s: virtqueue %"PRIu16" is too big. (size = %"PRIu32", max = %"PRIu32")\n",
vsession->name, i, task_cnt, SPDK_VHOST_MAX_VQ_SIZE);
free_task_pool(svsession);
return -1;

View File

@ -148,7 +148,7 @@ struct hugepage_file_info {
* We choose option 2.
*/
static int
get_hugepage_file_info(struct hugepage_file_info huges[], int max)
get_hugepage_file_info(struct hugepage_file_info hugepages[], int max)
{
int idx, rc;
FILE *f;
@ -210,15 +210,15 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max)
}
if (idx > 0 &&
strncmp(tmp, huges[idx - 1].path, PATH_MAX) == 0 &&
v_start == huges[idx - 1].addr + huges[idx - 1].size) {
huges[idx - 1].size += (v_end - v_start);
strncmp(tmp, hugepages[idx - 1].path, PATH_MAX) == 0 &&
v_start == hugepages[idx - 1].addr + hugepages[idx - 1].size) {
hugepages[idx - 1].size += (v_end - v_start);
continue;
}
huges[idx].addr = v_start;
huges[idx].size = v_end - v_start;
snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
hugepages[idx].addr = v_start;
hugepages[idx].size = v_end - v_start;
snprintf(hugepages[idx].path, PATH_MAX, "%s", tmp);
idx++;
}
@ -232,9 +232,9 @@ static int
prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
{
int i, num;
struct hugepage_file_info huges[VHOST_USER_MEMORY_MAX_NREGIONS];
struct hugepage_file_info hugepages[VHOST_USER_MEMORY_MAX_NREGIONS];
num = get_hugepage_file_info(huges, VHOST_USER_MEMORY_MAX_NREGIONS);
num = get_hugepage_file_info(hugepages, VHOST_USER_MEMORY_MAX_NREGIONS);
if (num < 0) {
SPDK_ERRLOG("Failed to prepare memory for vhost-user\n");
return num;
@ -242,11 +242,11 @@ prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
for (i = 0; i < num; ++i) {
/* the memory regions are unaligned */
msg->payload.memory.regions[i].guest_phys_addr = huges[i].addr; /* use vaddr! */
msg->payload.memory.regions[i].userspace_addr = huges[i].addr;
msg->payload.memory.regions[i].memory_size = huges[i].size;
msg->payload.memory.regions[i].guest_phys_addr = hugepages[i].addr; /* use vaddr! */
msg->payload.memory.regions[i].userspace_addr = hugepages[i].addr;
msg->payload.memory.regions[i].memory_size = hugepages[i].size;
msg->payload.memory.regions[i].flags_padding = 0;
fds[i] = open(huges[i].path, O_RDWR);
fds[i] = open(hugepages[i].path, O_RDWR);
}
msg->payload.memory.nregions = num;

View File

@ -694,7 +694,7 @@ vmd_create_new_bus(struct vmd_pci_bus *parent, struct vmd_pci_device *bridge, ui
/*
* Assigns a bus number from the list of available
* bus numbers. If the device is downstream of a hot plug port,
* assign the bus number from thiose assigned to the HP port. Otherwise,
* assign the bus number from those assigned to the HP port. Otherwise,
* assign the next bus number from the vmd bus number list.
*/
static uint8_t
@ -915,7 +915,7 @@ vmd_dev_init(struct vmd_pci_device *dev)
if (vmd_is_supported_device(dev)) {
spdk_pci_addr_fmt(bdf, sizeof(bdf), &dev->pci.addr);
SPDK_DEBUGLOG(vmd, "Initalizing NVMe device at %s\n", bdf);
SPDK_DEBUGLOG(vmd, "Initializing NVMe device at %s\n", bdf);
dev->pci.parent = dev->bus->vmd->pci;
spdk_pci_hook_device(spdk_pci_nvme_get_driver(), &dev->pci);
}
@ -934,7 +934,7 @@ vmd_dev_init(struct vmd_pci_device *dev)
* The bus number is scanned and if a device is found, based on the header_type, create
* either PciBridge(1) or PciDevice(0).
*
* If a PciBridge, assign bus numbers and rescan new bus. The currenty PciBridge being
* If a PciBridge, assign bus numbers and rescan new bus. The currently PciBridge being
* scanned becomes the passed in parent_bridge with the new bus number.
*
* The linked list becomes list of pciBridges with PciDevices attached.