fix various typos found by Lintian

Cc: stable@dpdk.org

Signed-off-by: Luca Boccassi <bluca@debian.org>
This commit is contained in:
Luca Boccassi 2020-02-29 16:37:06 +00:00 committed by Thomas Monjalon
parent 68f7f31aaa
commit 611faa5f46
15 changed files with 18 additions and 18 deletions

View File

@ -95,7 +95,7 @@ static void cmd_help_brief_parsed(__rte_unused void *parsed_result,
" help ports : Configuring ports.\n"
" help registers : Reading and setting port registers.\n"
" help filters : Filters configuration help.\n"
" help traffic_management : Traffic Management commmands.\n"
" help traffic_management : Traffic Management commands.\n"
" help devices : Device related cmds.\n"
" help all : All of the above sections.\n\n"
);
@ -5150,7 +5150,7 @@ cmd_gso_size_parsed(void *parsed_result,
if (test_done == 0) {
printf("Before setting GSO segsz, please first"
" stop fowarding\n");
" stop forwarding\n");
return;
}

View File

@ -1157,7 +1157,7 @@ test_refcnt_mbuf(void)
tref += refcnt_lcore[slave];
if (tref != refcnt_lcore[master])
rte_panic("refernced mbufs: %u, freed mbufs: %u\n",
rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
tref, refcnt_lcore[master]);
rte_mempool_dump(stdout, refcnt_pool);

View File

@ -215,7 +215,7 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_FCOE,
};
/* Don't modify Ltypes upto SCTP, otherwise it will
/* Don't modify Ltypes up to SCTP, otherwise it will
* effect flow tag calculation and thus RSS.
*/
enum npc_kpu_ld_ltype {
@ -262,7 +262,7 @@ enum npc_kpu_lg_ltype {
NPC_LT_LG_TU_ETHER_IN_NSH,
};
/* Don't modify Ltypes upto SCTP, otherwise it will
/* Don't modify Ltypes up to SCTP, otherwise it will
* effect flow tag calculation and thus RSS.
*/
enum npc_kpu_lh_ltype {

View File

@ -406,7 +406,7 @@ zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
qp->name = name;
/* Create completion queue upto max_inflight_ops */
/* Create completion queue up to max_inflight_ops */
qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
max_inflight_ops, socket_id);
if (qp->processed_pkts == NULL)

View File

@ -391,7 +391,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
dev_info->max_event_priority_levels =
DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
/* we only support dpio upto number of cores*/
/* we only support dpio up to number of cores */
if (dev_info->max_event_ports > rte_lcore_count())
dev_info->max_event_ports = rte_lcore_count();
dev_info->max_event_port_dequeue_depth =

View File

@ -1752,7 +1752,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
}
if (hash_types) {
PMD_DRV_LOG(ERR,
"Unknwon RSS config from firmware (%08x), RSS disabled",
"Unknown RSS config from firmware (%08x), RSS disabled",
vnic->hash_type);
return -ENOTSUP;
}

View File

@ -233,7 +233,7 @@ ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
if (val->index > 0x7)
return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item,
"port index upto 0x7 is supported");
"port index up to 0x7 is supported");
CXGBE_FILL_FS(val->index, mask->index, iport);

View File

@ -1850,13 +1850,13 @@ struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
key_iova = (size_t)rte_malloc(NULL, 256, 64);
if (!key_iova) {
DPAA2_PMD_ERR(
"Memory allocation failure for rule configration\n");
"Memory allocation failure for rule configuration\n");
goto mem_failure;
}
mask_iova = (size_t)rte_malloc(NULL, 256, 64);
if (!mask_iova) {
DPAA2_PMD_ERR(
"Memory allocation failure for rule configration\n");
"Memory allocation failure for rule configuration\n");
goto mem_failure;
}

View File

@ -84,7 +84,7 @@ rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
(2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE);
if (!flow) {
DPAA2_PMD_ERR(
"Memory allocation failure for rule configration\n");
"Memory allocation failure for rule configuration\n");
goto creation_error;
}
key_iova = (void *)((size_t)flow + sizeof(struct rte_flow));

View File

@ -869,7 +869,7 @@ static int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
err = alloc_mbox_info(func_to_func->mbox_resp);
if (err) {
PMD_DRV_LOG(ERR, "Allocating memory for mailbox responsing failed");
PMD_DRV_LOG(ERR, "Allocating memory for mailbox responding failed");
goto alloc_mbox_for_resp_err;
}

View File

@ -4850,7 +4850,7 @@ mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
/*
* Calculate the number of available resources - elts and WQEs.
* There are two possible different scenarios:
* - no data inlining into WQEs, one WQEBB may contains upto
* - no data inlining into WQEs, one WQEBB may contains up to
* four packets, in this case elts become scarce resource
* - data inlining into WQEs, one packet may require multiple
* WQEBBs, the WQEs become the limiting factor.

View File

@ -13,7 +13,7 @@
#include "pfe_logs.h"
#include "pfe_mod.h"
#define PFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/
#define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */
#define PFE_VDEV_GEM_ID_ARG "intf"
struct pfe_vdev_init_params {

View File

@ -1100,7 +1100,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
qede_reset_queue_stats(qdev, true);
/* Newer SR-IOV PF driver expects RX/TX queues to be started before
* enabling RSS. Hence RSS configuration is deferred upto this point.
* enabling RSS. Hence RSS configuration is deferred up to this point.
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/

View File

@ -1084,7 +1084,7 @@ rte_eal_init(int argc, char **argv)
#if defined(RTE_LIBRTE_KNI) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
} else if (rte_eal_check_module("rte_kni") == 1) {
iova_mode = RTE_IOVA_PA;
RTE_LOG(DEBUG, EAL, "KNI is loaded, selecting IOVA as PA mode for better KNI perfomance.\n");
RTE_LOG(DEBUG, EAL, "KNI is loaded, selecting IOVA as PA mode for better KNI performance.\n");
#endif
} else if (is_iommu_enabled()) {
/* we have an IOMMU, pick IOVA as VA mode */

View File

@ -115,7 +115,7 @@ struct rte_ipsec_sa {
* sqn and replay window
* In case of SA handled by multiple threads *sqn* cacheline
* could be shared by multiple cores.
* To minimise perfomance impact, we try to locate in a separate
* To minimise performance impact, we try to locate in a separate
* place from other frequently accesed data.
*/
union {