lib: fix typos

Signed-off-by: Pavel Shirshov <pavel.shirshov@gmail.com>
This commit is contained in:
Pavel Shirshov 2017-11-10 00:24:23 -08:00 committed by Thomas Monjalon
parent 82bf1caf5f
commit e32cb57973
38 changed files with 53 additions and 52 deletions

View File

@ -442,7 +442,7 @@ rte_distributor_process_v1705(struct rte_distributor *d,
/*
* Uncommenting the next line will cause the find_match
* function to be optimised out, making this function
* function to be optimized out, making this function
* do parallel (non-atomic) distribution
*/
/* matches[j] = 0; */
@ -536,7 +536,7 @@ MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d,
/*
* Return the number of packets in-flight in a distributor, i.e. packets
* being workered on or queued up in a backlog.
* being worked on or queued up in a backlog.
*/
static inline unsigned int
total_outstanding(const struct rte_distributor *d)
@ -663,7 +663,7 @@ rte_distributor_create_v1705(const char *name,
#endif
/*
* Set up the backog tags so they're pointing at the second cache
* Set up the backlog tags so they're pointing at the second cache
* line for performance during flow matching
*/
for (i = 0 ; i < num_workers ; i++)

View File

@ -71,7 +71,7 @@ struct rte_mbuf;
* @param alg_type
* Call the legacy API, or use the new burst API. legacy uses 32-bit
* flow ID, and works on a single packet at a time. Latest uses 15-
* bit flow ID and works on up to 8 packets at a time to worers.
* bit flow ID and works on up to 8 packets at a time to workers.
* @return
* The newly created distributor instance
*/

View File

@ -90,7 +90,7 @@ union rte_distributor_buffer_v20 {
/*
* Transfer up to 8 mbufs at a time to/from workers, and
* flow matching algorithm optimised for 8 flow IDs at a time
* flow matching algorithm optimized for 8 flow IDs at a time
*/
#define RTE_DIST_BURST_SIZE 8

View File

@ -345,7 +345,8 @@ rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0);
/* return the number of packets in-flight in a distributor, i.e. packets
* being workered on or queued up in a backlog. */
* being worked on or queued up in a backlog.
*/
static inline unsigned
total_outstanding(const struct rte_distributor_v20 *d)
{

View File

@ -249,7 +249,7 @@ static const struct logtype logtype_strings[] = {
{RTE_LOGTYPE_USER8, "user8"}
};
/* Logging should be first initialzer (before drivers and bus) */
/* Logging should be first initializer (before drivers and bus) */
RTE_INIT_PRIO(rte_log_init, 101);
static void
rte_log_init(void)

View File

@ -225,7 +225,7 @@ rte_memcpy_func(void *dst, const void *src, size_t n)
* We split the remaining bytes (which will be less than 256) into
* 64byte (2^6) chunks.
* Using incrementing integers in the case labels of a switch statement
* enourages the compiler to use a jump table. To get incrementing
* encourages the compiler to use a jump table. To get incrementing
* integers, we shift the 2 relevant bits to the LSB position to first
* get decrementing integers, and then subtract.
*/

View File

@ -164,7 +164,7 @@ rte_memcpy_func(void *dst, const void *src, size_t n)
* We split the remaining bytes (which will be less than 256) into
* 64byte (2^6) chunks.
* Using incrementing integers in the case labels of a switch statement
* enourages the compiler to use a jump table. To get incrementing
* encourages the compiler to use a jump table. To get incrementing
* integers, we shift the 2 relevant bits to the LSB position to first
* get decrementing integers, and then subtract.
*/

View File

@ -217,7 +217,7 @@ int rte_eal_primary_proc_alive(const char *config_file_path);
/**
* Usage function typedef used by the application usage function.
*
* Use this function typedef to define and call rte_set_applcation_usage_hook()
* Use this function typedef to define and call rte_set_application_usage_hook()
* routine.
*/
typedef void (*rte_usage_hook_t)(const char * prgname);

View File

@ -218,7 +218,7 @@ int rte_log_cur_msg_logtype(void);
* The string identifying the log type.
* @return
* - >0: success, the returned value is the log type identifier.
* - (-ENONEM): cannot allocate memory.
* - (-ENOMEM): cannot allocate memory.
*/
int rte_log_register(const char *name);

View File

@ -252,7 +252,7 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
}
/*
* joing two struct malloc_elem together. elem1 and elem2 must
* join two struct malloc_elem together. elem1 and elem2 must
* be contiguous in memory.
*/
static inline void

View File

@ -153,7 +153,7 @@ service_valid(uint32_t id)
service = &rte_services[id]; \
} while (0)
/* returns 1 if statistics should be colleced for service
/* returns 1 if statistics should be collected for service
* Returns 0 if statistics should not be collected for service
*/
static inline int

View File

@ -344,7 +344,7 @@ void numa_error(char *where)
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
* virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
* in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
* map continguous physical blocks in contiguous virtual blocks.
* map contiguous physical blocks in contiguous virtual blocks.
*/
static unsigned
map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,

View File

@ -113,7 +113,7 @@ static pthread_t msb_inc_thread_id;
/*
* This function runs on a specific thread to update a global variable
* containing used to process MSB of the HPET (unfortunatelly, we need
* containing used to process MSB of the HPET (unfortunately, we need
* this because hpet is 32 bits by default under linux).
*/
static void

View File

@ -241,7 +241,7 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = e1000_get_cable_length_m88;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
/* Check if this PHY is confgured for media swap. */
/* Check if this PHY is configured for media swap. */
if (phy->id == M88E1112_E_PHY_ID) {
u16 data;

View File

@ -952,7 +952,7 @@ revert_groups(struct efd_offline_group_rules *previous_group,
* This operation was still successful, and entry contains a valid update
* RTE_EFD_UPDATE_FAILED
* Either the EFD failed to find a suitable perfect hash or the group was full
* This is a fatal error, and the table is now in an indeterminite state
* This is a fatal error, and the table is now in an indeterminate state
* RTE_EFD_UPDATE_NO_CHANGE
* Operation resulted in no change to the table (same value already exists)
* 0

View File

@ -1062,7 +1062,7 @@ struct rte_eth_rxq_info {
/**
* Ethernet device TX queue information structure.
* Used to retieve information about configured queue.
* Used to retrieve information about configured queue.
*/
struct rte_eth_txq_info {
struct rte_eth_txconf conf; /**< queue config parameters. */

View File

@ -183,7 +183,7 @@ typedef int (*rte_tm_node_stats_update_t)(struct rte_eth_dev *dev,
typedef int (*rte_tm_node_wfq_weight_mode_update_t)(
struct rte_eth_dev *dev,
uint32_t node_id,
int *wfq_weigth_mode,
int *wfq_weight_mode,
uint32_t n_sp_priorities,
struct rte_tm_error *error);

View File

@ -116,7 +116,7 @@ struct gro_tcp4_tbl {
* This function creates a TCP/IPv4 reassembly table.
*
* @param socket_id
* socket index for allocating TCP/IPv4 reassemblt table
* socket index for allocating TCP/IPv4 reassemble table
* @param max_flow_num
* the maximum number of flows in the TCP/IPv4 GRO table
* @param max_item_per_flow

View File

@ -103,7 +103,7 @@ struct rte_gso_ctx {
* Before calling rte_gso_segment(), applications must set proper ol_flags
* for the packet. The GSO library uses the same macros as that of TSO.
* For example, set PKT_TX_TCP_SEG and PKT_TX_IPV4 in ol_flags to segment
* a TCP/IPv4 packet. If rte_gso_segment() succceds, the PKT_TX_TCP_SEG
* a TCP/IPv4 packet. If rte_gso_segment() succeeds, the PKT_TX_TCP_SEG
* flag is removed for all GSO segments and the input packet.
*
* Each of the newly-created GSO segments is organized as a two-segment

View File

@ -160,7 +160,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
}
/*
* errorneous packet: either exceeed max allowed number of fragments,
* erroneous packet: either exceed max allowed number of fragments,
* or duplicate first/last fragment encountered.
*/
if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {

View File

@ -70,7 +70,7 @@ struct ip_frag {
struct rte_mbuf *mb; /**< fragment mbuf */
};
/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
/** @internal <src addr, dst_addr, id> to uniquely identify fragmented datagram. */
struct ip_frag_key {
uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
uint32_t id; /**< dst address */
@ -118,7 +118,7 @@ struct rte_ip_frag_tbl {
uint32_t entry_mask; /**< hash value mask. */
uint32_t max_entries; /**< max entries allowed. */
uint32_t use_entries; /**< entries in use. */
uint32_t bucket_entries; /**< hash assocaitivity. */
uint32_t bucket_entries; /**< hash associativity. */
uint32_t nb_entries; /**< total size of the table. */
uint32_t nb_buckets; /**< num of associativity lines. */
struct ip_frag_pkt *last; /**< last used entry. */
@ -303,7 +303,7 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
* @param ip_hdr
* Pointer to the IPV4 header inside the fragment.
* @return
* Pointer to mbuf for reassebled packet, or NULL if:
* Pointer to mbuf for reassembled packet, or NULL if:
* - an error occurred.
* - not all fragments of the packet are collected yet.
*/

View File

@ -93,7 +93,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
/* update mbuf fields for reassembled packet. */
m->ol_flags |= PKT_TX_IP_CKSUM;
/* update ipv4 header for the reassmebled packet */
/* update ipv4 header for the reassembled packet */
ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
@ -117,7 +117,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
* @param ip_hdr
* Pointer to the IPV4 header inside the fragment.
* @return
* Pointer to mbuf for reassebled packet, or NULL if:
* Pointer to mbuf for reassembled packet, or NULL if:
* - an error occurred.
* - not all fragments of the packet are collected yet.
*/

View File

@ -313,7 +313,7 @@ rte_jobstats_set_max(struct rte_jobstats *job, uint64_t period);
*
* @param job
* Job object.
* @param update_pedriod_cb
* @param update_period_cb
* Callback to set. If NULL restore default update function.
*/
void

View File

@ -340,7 +340,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
/* Get an available slot from the pool */
slot = kni_memzone_pool_alloc();
if (!slot) {
RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n",
RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n",
kni_memzone_pool.max_ifaces);
return NULL;
}
@ -659,7 +659,7 @@ kni_allocate_mbufs(struct rte_kni *kni)
phys[i] = va2pa(pkts[i]);
}
/* No pkt mbuf alocated */
/* No pkt mbuf allocated */
if (i <= 0)
return;

View File

@ -228,7 +228,7 @@ const char *rte_kni_get_name(const struct rte_kni *kni);
* @param kni
* pointer to struct rte_kni.
* @param ops
* ponter to struct rte_kni_ops.
* pointer to struct rte_kni_ops.
*
* @return
* On success: 0

View File

@ -73,7 +73,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num)
}
/**
* Get up to num elements from the fifo. Return the number actully read
* Get up to num elements from the fifo. Return the number actually read
*/
static inline unsigned
kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)

View File

@ -850,10 +850,10 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
} while (0)
/**
* Allocate an unitialized mbuf from mempool *mp*.
* Allocate an uninitialized mbuf from mempool *mp*.
*
* This function can be used by PMDs (especially in RX functions) to
* allocate an unitialized mbuf. The driver is responsible of
* allocate an uninitialized mbuf. The driver is responsible of
* initializing all the required fields. See rte_pktmbuf_reset().
* For standard needs, prefer rte_pktmbuf_alloc().
*
@ -1778,7 +1778,7 @@ const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
* @param len
* The amount of bytes to read.
* @param buf
* The buffer where data is copied if it is not contigous in mbuf
* The buffer where data is copied if it is not contiguous in mbuf
* data. Its length should be at least equal to the len parameter.
* @return
* The pointer to the data, either in the mbuf if it is contiguous,

View File

@ -64,7 +64,7 @@ struct crc_pmull_ctx crc16_ccitt_pmull __rte_aligned(16);
* FOLD = XOR(T1, T2, DATA)
*
* @param data_block 16 byte data block
* @param precomp precomputed rk1 constanst
* @param precomp precomputed rk1 constant
* @param fold running 16 byte folded data
*
* @return New 16 byte folded data

View File

@ -66,7 +66,7 @@ struct crc_pclmulqdq_ctx crc16_ccitt_pclmulqdq __rte_aligned(16);
* @param data_block
* 16 byte data block
* @param precomp
* Precomputed rk1 constanst
* Precomputed rk1 constant
* @param fold
* Current16 byte folded data
*

View File

@ -237,7 +237,7 @@ rte_raw_cksum(const void *buf, size_t len)
* @param off
* The offset in bytes to start the checksum.
* @param len
* The length in bytes of the data to ckecksum.
* The length in bytes of the data to checksum.
* @param cksum
* A pointer to the checksum, filled on success.
* @return

View File

@ -227,7 +227,7 @@ pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
}
static int
pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
pdump_register_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
@ -281,7 +281,7 @@ pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
}
static int
pdump_regitser_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
pdump_register_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
@ -402,7 +402,7 @@ set_pdump_rxtx_cbs(struct pdump_request *p)
/* register RX callback */
if (flags & RTE_PDUMP_FLAG_RX) {
end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
ret = pdump_regitser_rx_callbacks(end_q, port, queue, ring, mp,
ret = pdump_register_rx_callbacks(end_q, port, queue, ring, mp,
operation);
if (ret < 0)
return ret;
@ -411,7 +411,7 @@ set_pdump_rxtx_cbs(struct pdump_request *p)
/* register TX callback */
if (flags & RTE_PDUMP_FLAG_TX) {
end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
ret = pdump_regitser_tx_callbacks(end_q, port, queue, ring, mp,
ret = pdump_register_tx_callbacks(end_q, port, queue, ring, mp,
operation);
if (ret < 0)
return ret;

View File

@ -483,7 +483,7 @@ int rte_pipeline_table_entry_delete(struct rte_pipeline *p,
* @param keys
* Array containing table entry keys
* @param entries
* Array containung new contents for every table entry identified by key
* Array containing new contents for every table entry identified by key
* @param n_keys
* Number of keys to add
* @param key_found

View File

@ -267,7 +267,7 @@ power_get_available_freqs(struct rte_power_info *pi)
}
ret = 0;
POWER_DEBUG_TRACE("%d frequencie(s) of lcore %u are available\n",
POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n",
count, pi->lcore_id);
out:
fclose(f);
@ -359,7 +359,7 @@ rte_power_acpi_cpufreq_init(unsigned lcore_id)
}
RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u "
"power manamgement\n", lcore_id);
"power management\n", lcore_id);
rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_USED);
return 0;

View File

@ -180,7 +180,7 @@ int rte_power_acpi_cpufreq_freq_max(unsigned lcore_id);
*
* @return
* - 1 on success with frequency changed.
* - 0 on success without frequency chnaged.
* - 0 on success without frequency changed.
* - Negative on error.
*/
int rte_power_acpi_cpufreq_freq_min(unsigned lcore_id);

View File

@ -147,9 +147,9 @@ rte_reorder_free(struct rte_reorder_buffer *b);
* -1 on error
* On error case, rte_errno will be set appropriately:
* - ENOSPC - Cannot move existing mbufs from reorder buffer to accommodate
* ealry mbuf, but it can be accommodated by performing drain and then insert.
* early mbuf, but it can be accommodated by performing drain and then insert.
* - ERANGE - Too early or late mbuf which is vastly out of range of expected
* window should be ingnored without any handling.
* window should be ignored without any handling.
*/
int
rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf);

View File

@ -139,7 +139,7 @@ rte_red_config_init(struct rte_red_config *red_cfg,
/**
* @brief Generate random number for RED
*
* Implemenetation based on:
* Implementation based on:
* http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/
*
* 10 bit shift has been found through empirical tests (was 16).
@ -200,7 +200,7 @@ __rte_red_calc_qempty_factor(uint8_t wq_log2, uint16_t m)
* Now using basic math we compute 2^n:
* 2^(f+n) = 2^f * 2^n
* 2^f - we use lookup table
* 2^n - can be replaced with bit shift right oeprations
* 2^n - can be replaced with bit shift right operations
*/
f = (n >> 6) & 0xf;

View File

@ -1020,7 +1020,7 @@ rte_sched_subport_read_stats(struct rte_sched_port *port,
memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
/* Subport TC ovesubscription status */
/* Subport TC oversubscription status */
*tc_ov = s->tc_ov;
return 0;

View File

@ -195,7 +195,7 @@ timer_set_running_state(struct rte_timer *tim)
/*
* Return a skiplist level for a new entry.
* This probabalistically gives a level with p=1/4 that an entry at level n
* This probabilistically gives a level with p=1/4 that an entry at level n
* will also appear at level n+1.
*/
static uint32_t