doc: fix spelling reported by aspell in comments

Fix spelling errors in the doxygen docs.

Signed-off-by: John McNamara <john.mcnamara@intel.com>
This commit is contained in:
John McNamara 2019-04-26 16:14:22 +01:00 committed by Thomas Monjalon
parent d629b7b5fe
commit 8bd5f07c7a
57 changed files with 112 additions and 112 deletions

@ -8,7 +8,7 @@
* @brief
* This file documents the external interfaces that the QAT FW running
* on the QAT Acceleration Engine provides to clients wanting to
* accelerate crypto assymetric applications
* accelerate crypto asymmetric applications
*/
#ifndef _ICP_QAT_FW_MMP_IDS_

@ -10,7 +10,7 @@
* @brief
* This file documents the external interfaces that the QAT FW running
* on the QAT Acceleration Engine provides to clients wanting to
* accelerate crypto assymetric applications
* accelerate crypto asymmetric applications
*/
#ifndef _ICP_QAT_FW_PKE_H_

@ -24,7 +24,7 @@
* packets.
*
* A opdl_ring can be used as the basis for pipeline based applications. Instead
* of each stage in a pipeline dequeueing from a ring, processing and enqueueing
* of each stage in a pipeline dequeuing from a ring, processing and enqueuing
* to another ring, it can process entries in-place on the ring. If stages do
* not depend on each other, they can run in parallel.
*
@ -152,7 +152,7 @@ opdl_ring_get_name(const struct opdl_ring *t);
* Enabling this may have a negative impact on performance if only one thread
* will be processing this stage.
* @param is_input
* Indication to nitialise the stage with all slots available or none
* Indication to initialise the stage with all slots available or none
*
* @return
* A pointer to the new stage, or NULL on error.
@ -589,7 +589,7 @@ opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
* Compare the event descriptor with original version in the ring.
* if key field event descriptor is changed by application, then
* update the slot in the ring otherwise do nothing with it.
* the key field is flow_id, prioirty, mbuf, impl_opaque
* the key field is flow_id, priority, mbuf, impl_opaque
*
* @param s
* The opdl_stage.
@ -600,7 +600,7 @@ opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
* @param atomic
* queue type associate with the stage.
* @return
* if the evevnt key field is changed compare with previous record.
* if the event key field is changed compare with previous record.
*/
bool

@ -270,7 +270,7 @@ struct rte_pmd_i40e_pkt_template_action {
struct rte_pmd_i40e_pkt_template_input {
/** the pctype used for raw packet template */
uint16_t pctype;
/** the buffer conatining raw packet template */
/** the buffer containing raw packet template */
void *packet;
/** the length of buffer with raw packet template */
uint32_t length;
@ -314,7 +314,7 @@ struct rte_pmd_i40e_inset {
* @param conf
* Specifies configuration parameters of raw packet template filter.
* @param add
* Speicifes an action to be taken - add or remove raw packet template filter.
* Specifies an action to be taken - add or remove raw packet template filter.
* @return
* - (0) if successful.
* - (-ENODEV) if *port* invalid.

@ -770,7 +770,7 @@ nfp_net_start(struct rte_eth_dev *dev)
return -EIO;
/*
* Allocating rte mbuffs for configured rx queues.
* Allocating rte mbufs for configured rx queues.
* This requires queues being enabled before
*/
if (nfp_net_rx_freelist_setup(dev) < 0) {
@ -1551,7 +1551,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
if (rxq == NULL)
return -ENOMEM;
/* Hw queues mapping based on firmware confifguration */
/* Hw queues mapping based on firmware configuration */
rxq->qidx = queue_idx;
rxq->fl_qcidx = queue_idx * hw->stride_rx;
rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
@ -1583,7 +1583,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
socket_id);
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocatig rx dma");
PMD_DRV_LOG(ERR, "Error allocating rx dma");
nfp_net_rx_queue_release(rxq);
return -ENOMEM;
}
@ -1970,7 +1970,7 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
/*
* RX path design:
*
* There are some decissions to take:
* There are some decisions to take:
* 1) How to check DD RX descriptors bit
* 2) How and when to allocate new mbufs
*
@ -2040,7 +2040,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_rmb();
/*
* We got a packet. Let's alloc a new mbuff for refilling the
* We got a packet. Let's alloc a new mbuf for refilling the
* free descriptor ring as soon as possible
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
@ -2055,8 +2055,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_hold++;
/*
* Grab the mbuff and refill the descriptor with the
* previously allocated mbuff
* Grab the mbuf and refill the descriptor with the
* previously allocated mbuf
*/
mb = rxb->mbuf;
rxb->mbuf = new_mb;
@ -2088,7 +2088,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return -EINVAL;
}
/* Filling the received mbuff with packet info */
/* Filling the received mbuf with packet info */
if (hw->rx_offset)
mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
else
@ -2113,7 +2113,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
/* Adding the mbuff to the mbuff array passed by the app */
/* Adding the mbuf to the mbuf array passed by the app */
rx_pkts[avail++] = mb;
/* Now resetting and updating the descriptor */

@ -34,7 +34,7 @@
*
* @file dpdk/pmd/nfp_net_pmd.h
*
* Netronome NFP_NET PDM driver
* Netronome NFP_NET PMD driver
*/
#ifndef _NFP_NET_PMD_H_
@ -240,7 +240,7 @@ struct nfp_net_txq {
uint32_t tx_free_thresh;
/*
* For each descriptor keep a reference to the mbuff and
* For each descriptor keep a reference to the mbuf and
* DMA address used until completion is signalled.
*/
struct {

@ -34,7 +34,7 @@ enum {
};
/**
* If user has configued a Virtual Queue mode, but for some particular VQ
* If user has configured a Virtual Queue mode, but for some particular VQ
* user needs an exclusive H/W queue associated (for better performance
* on that particular VQ), then user can pass this flag while creating the
* Virtual Queue. A H/W queue will be allocated corresponding to
@ -307,7 +307,7 @@ rte_qdma_vq_stats(uint16_t vq_id,
* VQ's at runtime.
*
* @param vq_id
* Virtual Queue ID which needs to be deinialized.
* Virtual Queue ID which needs to be uninitialized.
*
* @returns
* - 0: Success.
@ -322,7 +322,7 @@ rte_qdma_vq_destroy(uint16_t vq_id);
* VQ's at runtime.
*
* @param vq_id
* RBP based Virtual Queue ID which needs to be deinialized.
* RBP based Virtual Queue ID which needs to be uninitialized.
*
* @returns
* - 0: Success.

@ -206,12 +206,12 @@ void lthread_run(void);
* Creates an lthread and places it in the ready queue on a particular
* lcore.
*
* If no scheduler exists yet on the curret lcore then one is created.
* If no scheduler exists yet on the current lcore then one is created.
*
* @param new_lt
* Pointer to an lthread pointer that will be initialized
* @param lcore
* the lcore the thread should be started on or the current clore
* the lcore the thread should be started on or the current lcore
* -1 the current lcore
* 0 - LTHREAD_MAX_LCORES any other lcore
* @param lthread_func
@ -469,7 +469,7 @@ void
/**
* Set lthread TLS
*
* This function is modelled on pthread_set_sepcific()
* This function is modelled on pthread_set_specific()
* It associates a thread-specific value with a key obtained via a previous
* call to lthread_key_create().
* Different threads may bind different values to the same key. These values
@ -749,7 +749,7 @@ int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);
* Signal a condition variable
*
* The function unblocks one thread waiting for the condition variable cond.
* If no threads are waiting on cond, the rte_lthead_cond_signal() function
* If no threads are waiting on cond, the rte_lthread_cond_signal() function
* has no effect.
*
* @param cond
@ -765,7 +765,7 @@ int lthread_cond_signal(struct lthread_cond *c);
* Broadcast a condition variable
*
* The function unblocks all threads waiting for the condition variable cond.
* If no threads are waiting on cond, the rte_lthead_cond_broadcast()
* If no threads are waiting on cond, the rte_lathed_cond_broadcast()
* function has no effect.
*
* @param cond

@ -17,7 +17,7 @@ extern "C" {
/*
* Takes 2 SIMD registers containing N transitions eachi (tr0, tr1).
* Takes 2 SIMD registers containing N transitions each (tr0, tr1).
* Shuffles it into different representation:
* lo - contains low 32 bits of given N transitions.
* hi - contains high 32 bits of given N transitions.
@ -66,7 +66,7 @@ extern "C" {
\
dfa_ofs = _##P##_sub_epi32(t, r); \
\
/* QUAD/SINGLE caluclations. */ \
/* QUAD/SINGLE calculations. */ \
t = _##P##_cmpgt_epi8(in, tr_hi); \
t = _##P##_sign_epi8(t, t); \
t = _##P##_maddubs_epi16(t, t); \

@ -43,7 +43,7 @@ extern "C" {
#define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
#endif
/** Flags indiciate current state of BBDEV device */
/** Flags indicate current state of BBDEV device */
enum rte_bbdev_state {
RTE_BBDEV_UNUSED,
RTE_BBDEV_INITIALIZED
@ -161,7 +161,7 @@ rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
/**
* Start a device.
* This is the last step needed before enqueueing operations is possible.
* This is the last step needed before enqueuing operations is possible.
*
* @param dev_id
* The identifier of the device.

@ -120,7 +120,7 @@ rte_bpf_destroy(struct rte_bpf *bpf);
* Create a new eBPF execution context and load given BPF code into it.
*
* @param prm
* Parameters used to create and initialise the BPF exeution context.
* Parameters used to create and initialise the BPF execution context.
* @return
* BPF handle that is used in future BPF operations,
* or NULL on error, with error code set in rte_errno.
@ -136,7 +136,7 @@ rte_bpf_load(const struct rte_bpf_prm *prm);
* file into it.
*
* @param prm
* Parameters used to create and initialise the BPF exeution context.
* Parameters used to create and initialise the BPF execution context.
* @param fname
* Pathname for a ELF file.
* @param sname
@ -183,7 +183,7 @@ rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
uint32_t num);
/**
* Provide information about natively compield code for given BPF handle.
* Provide information about natively compiled code for given BPF handle.
*
* @param bpf
* handle for the BPF code.

@ -73,7 +73,7 @@ rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue);
* @param sname
* Name of the executable section within the file to load.
* @param prm
* Parameters used to create and initialise the BPF exeution context.
* Parameters used to create and initialise the BPF execution context.
* @param flags
* Flags that define expected behavior of the loaded filter
* (i.e. jited/non-jited version to use).
@ -98,7 +98,7 @@ rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,
* @param sname
* Name of the executable section within the file to load.
* @param prm
* Parameters used to create and initialise the BPF exeution context.
* Parameters used to create and initialise the BPF execution context.
* @param flags
* Flags that define expected expected behavior of the loaded filter
* (i.e. jited/non-jited version to use).

@ -114,15 +114,15 @@ enum rte_crypto_rsa_padding_type {
/**< RSA no padding scheme */
RTE_CRYPTO_RSA_PKCS1_V1_5_BT0,
/**< RSA PKCS#1 V1.5 Block Type 0 padding scheme
* as descibed in rfc2313
* as described in rfc2313
*/
RTE_CRYPTO_RSA_PKCS1_V1_5_BT1,
/**< RSA PKCS#1 V1.5 Block Type 01 padding scheme
* as descibed in rfc2313
* as described in rfc2313
*/
RTE_CRYPTO_RSA_PKCS1_V1_5_BT2,
/**< RSA PKCS#1 V1.5 Block Type 02 padding scheme
* as descibed in rfc2313
* as described in rfc2313
*/
RTE_CRYPTO_RSA_PADDING_OAEP,
/**< RSA PKCS#1 OAEP padding scheme */
@ -229,7 +229,7 @@ struct rte_crypto_rsa_xform {
/**
* Asymmetric Modular exponentiation transform data
*
* Structure describing modular exponentation xform param
* Structure describing modular exponentiation xform param
*
*/
struct rte_crypto_modex_xform {
@ -282,7 +282,7 @@ struct rte_crypto_dh_xform {
rte_crypto_param p;
/**< p : Prime modulus data
* DH prime modulous data in octet-string network byte order format.
* DH prime modulus data in octet-string network byte order format.
*
*/
@ -348,7 +348,7 @@ struct rte_crypto_mod_op_param {
* data in octet-string network byte order format.
*
* This field shall be big enough to hold the result of Modular
* Exponentiation or Modular Multplicative Inverse
* Exponentiation or Modular Multiplicative Inverse
* (bigger or equal to length of modulus)
*/
};

@ -1141,7 +1141,7 @@ rte_cryptodev_asym_session_clear(uint8_t dev_id,
* the user data size.
*
* @return
* Size of the symmetric eader session.
* Size of the symmetric header session.
*/
unsigned int
rte_cryptodev_sym_get_header_session_size(void);

@ -41,7 +41,7 @@ extern "C" {
/**
* Maximum number of workers allowed.
* Be aware of increasing the limit, becaus it is limited by how we track
* Be aware of increasing the limit, because it is limited by how we track
* in-flight tags. See in_flight_bitmask and rte_distributor_process
*/
#define RTE_DISTRIB_MAX_WORKERS 64

@ -173,7 +173,7 @@ rte_delay_us_sleep(unsigned int us);
*
* @param userfunc
* User function which replaces rte_delay_us. rte_delay_us_block restores
* buildin block delay function.
* builtin block delay function.
*/
void rte_delay_us_callback_register(void(*userfunc)(unsigned int));

@ -15,7 +15,7 @@
*
* A device class defines the type of function a device
* will be used for e.g.: Ethernet adapter (eth),
* cryptographic coprocessor (crypto), etc.
* cryptographic co-processor (crypto), etc.
*/
#ifdef __cplusplus

@ -359,7 +359,7 @@ rte_is_power_of_2(uint32_t n)
* Aligns input parameter to the next power of 2
*
* @param x
* The integer value to algin
* The integer value to align
*
* @return
* Input parameter aligned to the next power of 2
@ -377,7 +377,7 @@ rte_align32pow2(uint32_t x)
* Aligns input parameter to the previous power of 2
*
* @param x
* The integer value to algin
* The integer value to align
*
* @return
* Input parameter aligned to the previous power of 2

@ -284,7 +284,7 @@ rte_mp_action_unregister(const char *name);
*
* Send a message to the peer process.
*
* This function will send a message which will be responsed by the action
* This function will send a message which will be responded by the action
* identified by name in the peer process.
*
* @param msg
@ -421,7 +421,7 @@ rte_set_application_usage_hook(rte_usage_hook_t usage_func);
#define RTE_EAL_TAILQ_RWLOCK (&rte_eal_get_configuration()->mem_config->qlock)
/**
* macro to get the multiple lock of mempool shared by mutiple-instance
* macro to get the multiple lock of mempool shared by multiple-instance
*/
#define RTE_EAL_MEMPOOL_RWLOCK (&rte_eal_get_configuration()->mem_config->mplock)

@ -37,7 +37,7 @@ struct rte_logs {
struct rte_log_dynamic_type *dynamic_types;
};
/** Global log informations */
/** Global log information */
extern struct rte_logs rte_logs;
/* SDK log type */

@ -337,7 +337,7 @@ int32_t rte_service_set_stats_enable(uint32_t id, int32_t enable);
int32_t rte_service_lcore_list(uint32_t array[], uint32_t n);
/**
* Get the numer of services running on the supplied lcore.
* Get the number of services running on the supplied lcore.
*
* @param lcore Id of the service core.
* @retval >=0 Number of services registered to this core.

@ -53,7 +53,7 @@ struct rte_tailq_elem {
};
/**
* Return the first tailq entry casted to the right struct.
* Return the first tailq entry cast to the right struct.
*/
#define RTE_TAILQ_CAST(tailq_entry, struct_name) \
(struct struct_name *)&(tailq_entry)->tailq_head

@ -43,7 +43,7 @@ extern "C" {
#include <stdbool.h>
/**
* Struct describing a Universal Unique Identifer
* Struct describing a Universal Unique Identifier
*/
typedef unsigned char rte_uuid_t[16];
@ -105,7 +105,7 @@ int rte_uuid_compare(const rte_uuid_t a, const rte_uuid_t b);
* @param uu
* Destination UUID
* @return
* Returns 0 on succes, and -1 if string is not a valid UUID.
* Returns 0 on success, and -1 if string is not a valid UUID.
*/
int rte_uuid_parse(const char *in, rte_uuid_t uu);

@ -180,7 +180,7 @@ int rte_vfio_noiommu_is_enabled(void);
* an error on BSD.
*
* @param vfio_group_fd
* VFIO Grouup FD.
* VFIO Group FD.
*
* @return
* 0 on success.

@ -1443,7 +1443,7 @@ eal_legacy_hugepage_init(void)
if (mcfg->dma_maskbits &&
rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
RTE_LOG(ERR, EAL,
"%s(): couldnt allocate memory due to IOVA exceeding limits of current DMA mask.\n",
"%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
__func__);
if (rte_eal_iova_mode() == RTE_IOVA_VA &&
rte_eal_using_phys_addrs())

@ -191,7 +191,7 @@ rte_efd_find_existing(const char *name);
* This operation was still successful, and entry contains a valid update
* RTE_EFD_UPDATE_FAILED
* Either the EFD failed to find a suitable perfect hash or the group was full
* This is a fatal error, and the table is now in an indeterminite state
* This is a fatal error, and the table is now in an indeterminate state
* RTE_EFD_UPDATE_NO_CHANGE
* Operation resulted in no change to the table (same value already exists)
* 0 - success

@ -540,7 +540,7 @@ struct rte_eth_fdir_masks {
uint16_t vlan_tci_mask; /**< Bit mask for vlan_tci in big endian */
/** Bit mask for ipv4 flow in big endian. */
struct rte_eth_ipv4_flow ipv4_mask;
/** Bit maks for ipv6 flow in big endian. */
/** Bit mask for ipv6 flow in big endian. */
struct rte_eth_ipv6_flow ipv6_mask;
/** Bit mask for L4 source port in big endian. */
uint16_t src_port_mask;

@ -448,7 +448,7 @@ struct rte_eth_rss_conf {
/*
* A packet can be identified by hardware as different flow types. Different
* NIC hardwares may support different flow types.
* NIC hardware may support different flow types.
* Basically, the NIC hardware identifies the flow type as deep protocol as
* possible, and exclusively. For example, if a packet is identified as
* 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
@ -483,7 +483,7 @@ struct rte_eth_rss_conf {
/*
* The RSS offload types are defined based on flow types.
* Different NIC hardwares may support different RSS offload
* Different NIC hardware may support different RSS offload
* types. The supported flow types or RSS offload types can be queried by
* rte_eth_dev_info_get().
*/
@ -2220,7 +2220,7 @@ rte_eth_xstats_get_names_by_id(uint16_t port_id,
* A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
* can be set to NULL if size is 0. In this case function will retrieve
* all avalible statistics.
* all available statistics.
* @param values
* A pointer to a table to be filled with device statistics values.
* @param size
@ -2690,7 +2690,7 @@ rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
/**
* Request the driver to free mbufs currently cached by the driver. The
* driver will only free the mbuf if it is no longer in use. It is the
* application's responsibity to ensure rte_eth_tx_buffer_flush(..) is
* application's responsibility to ensure rte_eth_tx_buffer_flush(..) is
* called if needed.
*
* @param port_id

@ -105,7 +105,7 @@ typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
/**< @internal Get specific informations of an Ethernet device. */
/**< @internal Get specific information of an Ethernet device. */
typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
/**< @internal Get supported ptypes of an Ethernet device. */
@ -367,7 +367,7 @@ typedef int (*eth_tm_ops_get_t)(struct rte_eth_dev *dev, void *ops);
/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
typedef int (*eth_mtr_ops_get_t)(struct rte_eth_dev *dev, void *ops);
/**< @internal Get Trafffic Metering and Policing (MTR) operations */
/**< @internal Get Traffic Metering and Policing (MTR) operations */
typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);

@ -317,7 +317,7 @@ typedef int (*ethdev_uninit_t)(struct rte_eth_dev *ethdev);
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
* PMD helper function for cleaing up the resources of a ethdev port on it's
* PMD helper function for cleaning up the resources of a ethdev port on it's
* destruction.
*
* @param ethdev

@ -369,7 +369,7 @@ struct rte_tm_capabilities {
uint32_t sched_wfq_weight_max;
/** WRED packet mode support. When non-zero, this parameter indicates
* that there is atleast one leaf node that supports the WRED packet
* that there is at least one leaf node that supports the WRED packet
* mode, which might not be true for all the leaf nodes. In packet
* mode, the WRED thresholds specify the queue length in packets, as
* opposed to bytes.
@ -377,7 +377,7 @@ struct rte_tm_capabilities {
int cman_wred_packet_mode_supported;
/** WRED byte mode support. When non-zero, this parameter indicates that
* there is atleast one leaf node that supports the WRED byte mode,
* there is at least one leaf node that supports the WRED byte mode,
* which might not be true for all the leaf nodes. In byte mode, the
* WRED thresholds specify the queue length in bytes, as opposed to
* packets.
@ -636,7 +636,7 @@ struct rte_tm_level_capabilities {
uint32_t shaper_shared_n_max;
/** WRED packet mode support. When non-zero, this
* parameter indicates that there is atleast one leaf
* parameter indicates that there is at least one leaf
* node on this level that supports the WRED packet
* mode, which might not be true for all the leaf
* nodes. In packet mode, the WRED thresholds specify
@ -645,7 +645,7 @@ struct rte_tm_level_capabilities {
int cman_wred_packet_mode_supported;
/** WRED byte mode support. When non-zero, this
* parameter indicates that there is atleast one leaf
* parameter indicates that there is at least one leaf
* node on this level that supports the WRED byte mode,
* which might not be true for all the leaf nodes. In
* byte mode, the WRED thresholds specify the queue

@ -139,7 +139,7 @@
* - rte_event_crypto_adapter_stats_get()
* - rte_event_crypto_adapter_stats_reset()
* The applicaton creates an instance using rte_event_crypto_adapter_create()
* The application creates an instance using rte_event_crypto_adapter_create()
* or rte_event_crypto_adapter_create_ext().
*
* Cryptodev queue pair addition/deletion is done using the

@ -66,9 +66,9 @@
* For SW based packet transfers, i.e., when the
* RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
* capabilities flags for a particular ethernet device, the service function
* temporarily enqueues mbufs to an event buffer before batch enqueueing these
* temporarily enqueues mbufs to an event buffer before batch enqueuing these
* to the event device. If the buffer fills up, the service function stops
* dequeueing packets from the ethernet device. The application may want to
* dequeuing packets from the ethernet device. The application may want to
* monitor the buffer fill level and instruct the service function to
* selectively buffer packets. The application may also use some other
* criteria to decide which packets should enter the event device even when

@ -1155,7 +1155,7 @@ rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
*/
#define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
/**< Flag indicates HW/SW suports a mechanism to store and retrieve
/**< Flag indicates HW/SW supports a mechanism to store and retrieve
* the private data information along with the crypto session.
*/
@ -1731,7 +1731,7 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
* @see rte_event_port_unlink() to issue unlink requests.
*
* @param dev_id
* The indentifier of the device.
* The identifier of the device.
*
* @param port_id
* Event port identifier to select port to check for unlinks in progress.

@ -873,7 +873,7 @@ typedef int (*eventdev_eth_tx_adapter_free_t)(uint8_t id,
* Ethernet device pointer
*
* @param tx_queue_id
* Transmt queue index
* Transmit queue index
*
* @return
* - 0: Success.

@ -208,7 +208,7 @@ rte_flow_classify_validate(struct rte_flow_classifier *cls,
struct rte_flow_error *error);
/**
* Add a flow classify rule to the flow_classifer table.
* Add a flow classify rule to the flow_classifier table.
*
* @param[in] cls
* Flow classifier handle
@ -235,7 +235,7 @@ rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
struct rte_flow_error *error);
/**
* Delete a flow classify rule from the flow_classifer table.
* Delete a flow classify rule from the flow_classifier table.
*
* @param[in] cls
* Flow classifier handle

@ -39,7 +39,7 @@ extern "C" {
/** Flag to support reader writer concurrency */
#define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY 0x04
/** Flag to indicate the extendabe bucket table feature should be used */
/** Flag to indicate the extendable bucket table feature should be used */
#define RTE_HASH_EXTRA_FLAGS_EXT_TABLE 0x08
/** Flag to disable freeing of key index on hash delete.

@ -274,7 +274,7 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
/**
* This function implements reassembly of fragmented IPv4 packets.
* Incoming mbufs should have its l2_len/l3_len fields setup correclty.
* Incoming mbufs should have its l2_len/l3_len fields setup correctly.
*
* @param tbl
* Table where to lookup/add the fragmented packet.

@ -95,7 +95,7 @@ gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)
/*
* Helper routine to copy IV
* Righ now we support only algorithms with IV length equals 0/8/16 bytes.
* Right now we support only algorithms with IV length equals 0/8/16 bytes.
*/
static inline void
copy_iv(uint64_t dst[IPSEC_MAX_IV_QWORD],

@ -7,7 +7,7 @@
/**
* @file misc.h
* Contains miscelaneous functions/structures/macros used internally
* Contains miscellaneous functions/structures/macros used internally
* by ipsec library.
*/

@ -10,7 +10,7 @@
* @b EXPERIMENTAL: this API may change without prior notice
*
* RTE IPsec support.
* It is not recommended to include this file direclty,
* It is not recommended to include this file directly,
* include <rte_ipsec.h> instead.
* Contains helper functions to process completed crypto-ops
* and group related packets by sessions they belong to.

@ -134,7 +134,7 @@ rte_ipsec_sa_type(const struct rte_ipsec_sa *sa);
/**
* Calculate required SA size based on provided input parameters.
* @param prm
* Parameters that wil be used to initialise SA object.
* Parameters that will be used to initialise SA object.
* @return
* - Actual size required for SA with given parameters.
* - -EINVAL if the parameters are invalid.

@ -24,7 +24,7 @@ extern "C" {
* Note: This function pointer is for future flow based latency stats
* implementation.
*
* Function type used for identifting flow types of a Rx packet.
* Function type used for identifying flow types of a Rx packet.
*
* The callback function is called on Rx for each packet.
* This function is used for flow based latency calculations.

@ -442,7 +442,7 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
* @param hop
* Next hop of the most specific rule found for IP (valid on lookup hit only).
* This is an 4 elements array of two byte values.
* If the lookup was succesfull for the given IP, then least significant byte
* If the lookup was successful for the given IP, then least significant byte
* of the corresponding element is the actual next hop and the most
* significant byte is zero.
* If the lookup for the given IP failed, then corresponding element would

@ -210,7 +210,7 @@ extern "C" {
/**
* Outer UDP checksum offload flag. This flag is used for enabling
* outer UDP checksum in PMD. To use outer UDP checksum, the user needs to
* 1) Enable the following in mbuff,
* 1) Enable the following in mbuf,
* a) Fill outer_l2_len and outer_l3_len in mbuf.
* b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
* c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
@ -485,7 +485,7 @@ struct rte_mbuf_sched {
}; /**< Hierarchical scheduler */
/**
* enum for the tx_offload bit-fields lenghts and offsets.
* enum for the tx_offload bit-fields lengths and offsets.
* defines the layout of rte_mbuf tx_offload field.
*/
enum {
@ -1423,7 +1423,7 @@ static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
* The given mbuf must have only one segment.
*
* @param m
* The packet mbuf to be resetted.
* The packet mbuf to be reset.
*/
#define MBUF_INVALID_PORT UINT16_MAX
@ -1596,7 +1596,7 @@ rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
* ``rte_pktmbuf_detach()``.
*
* Memory for shared data must be provided and user must initialize all of
* the content properly, escpecially free callback and refcnt. The pointer
* the content properly, especially free callback and refcnt. The pointer
* of shared data will be stored in m->shinfo.
* ``rte_pktmbuf_ext_shinfo_init_helper`` can help to simply spare a few
* bytes at the end of buffer for the shared data, store free callback and

@ -426,7 +426,7 @@ extern "C" {
*/
#define RTE_PTYPE_TUNNEL_ESP 0x00009000
/**
* L2TP (Layer 2 Tunneling Protocol) tunnleing packet type.
* L2TP (Layer 2 Tunneling Protocol) tunneling packet type.
*
* Packet format:
* <'ether type'=0x0800

@ -427,7 +427,7 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
* Dequeue a number of contiquous object blocks from the external pool.
* Dequeue a number of contiguous object blocks from the external pool.
*/
typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
void **first_obj_table, unsigned int n);
@ -1363,7 +1363,7 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
&cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
* In the offchance that we are buffer constrained,
* In the off chance that we are buffer constrained,
* where we are not able to allocate cache + n, go to
* the ring directly. If that fails, we are truly out of
* buffers.

@ -532,7 +532,7 @@ struct rte_table_action_encap_config {
};
};
/** QinQ_PPPoE encap paramaeters. */
/** QinQ_PPPoE encap parameters. */
struct rte_table_encap_ether_qinq_pppoe {
/** Only valid when *type* is set to QinQ. */

@ -59,7 +59,7 @@ struct freq_threshold {
uint32_t cur_train_iter;
};
/* Each Worder Thread Empty Poll Stats */
/* Each Worker Thread Empty Poll Stats */
struct priority_worker {
/* Current dequeue and throughput counts */

@ -25,7 +25,7 @@ extern "C" {
#include <rte_memory.h>
#include <rte_errno.h>
/* Rawdevice object - essentially a void to be typecasted by implementation */
/* Rawdevice object - essentially a void to be typecast by implementation */
typedef void *rte_rawdev_obj_t;
/**
@ -244,7 +244,7 @@ rte_rawdev_close(uint16_t dev_id);
* @param dev_id
* Raw device identifiers
* @return
* 0 for sucessful reset,
* 0 for successful reset,
* !0 for failure in resetting
*/
int
@ -373,7 +373,7 @@ rte_rawdev_set_attr(uint16_t dev_id,
* @param dev_id
* The identifier of the device to configure.
* @param buffers
* Collection of buffers for enqueueing
* Collection of buffers for enqueuing
* @param count
* Count of buffers to enqueue
* @param context

@ -282,7 +282,7 @@ typedef uint16_t (*rawdev_queue_count_t)(struct rte_rawdev *dev);
* an opaque object representing context of the call; for example, an
* application can pass information about the queues on which enqueue needs
* to be done. Or, the enqueue operation might be passed reference to an
* object containing a callback (agreed upon between applicatio and driver).
* object containing a callback (agreed upon between application and driver).
*
* @return
* >=0 Count of buffers successfully enqueued (0: no buffers enqueued)
@ -463,7 +463,7 @@ typedef int (*rawdev_firmware_version_get_t)(struct rte_rawdev *dev,
rte_rawdev_obj_t version_info);
/**
* Load firwmare from a buffer (DMA'able)
* Load firmware from a buffer (DMA'able)
*
* @param dev
* Raw device pointer
@ -480,7 +480,7 @@ typedef int (*rawdev_firmware_load_t)(struct rte_rawdev *dev,
rte_rawdev_obj_t firmware_buf);
/**
* Unload firwmare
* Unload firmware
*
* @param dev
* Raw device pointer
@ -548,7 +548,7 @@ struct rte_rawdev_ops {
/**< Reset the statistics values in xstats. */
rawdev_xstats_reset_t xstats_reset;
/**< Obtainer firmware status */
/**< Obtain firmware status */
rawdev_firmware_status_get_t firmware_status_get;
/**< Obtain firmware version information */
rawdev_firmware_version_get_t firmware_version_get;

@ -70,7 +70,7 @@ rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize,
* and return a pointer to it.
*
* @param name
* Name of the reorder buffer instacne as passed to rte_reorder_create()
* Name of the reorder buffer instance as passed to rte_reorder_create()
* @return
* Pointer to reorder buffer instance or NULL if object not found with rte_errno
* set appropriately. Possible rte_errno values include:

@ -302,7 +302,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
* (powerpc/arm).
* There are 2 choices for the users
* 1.use rmb() memory barrier
* 2.use one-direcion load_acquire/store_release barrier,defined by
* 2.use one-direction load_acquire/store_release barrier,defined by
* CONFIG_RTE_USE_C11_MEM_MODEL=y
* It depends on performance test results.
* By default, move common functions to rte_ring_generic.h

@ -33,7 +33,7 @@ extern "C" {
* classes of the same subport;
* - When any subport traffic class is oversubscribed
* (configuration time event), the usage of subport member
* pipes with high demand for thattraffic class pipes is
* pipes with high demand for that traffic class pipes is
* truncated to a dynamically adjusted value with no
* impact to low demand pipes;
* 3. Pipe:

@ -536,7 +536,7 @@ struct rte_security_capability {
enum rte_security_pdcp_domain domain;
/**< PDCP mode of operation: Control or data */
uint32_t capa_flags;
/**< Capabilitity flags, see RTE_SECURITY_PDCP_* */
/**< Capability flags, see RTE_SECURITY_PDCP_* */
} pdcp;
/**< PDCP capability */
};
@ -568,7 +568,7 @@ struct rte_security_capability {
#define RTE_SECURITY_TX_HW_TRAILER_OFFLOAD 0x00000002
/**< HW constructs trailer of packets
* Transmitted packets will have the trailer added to them
* by hardawre. The next protocol field will be based on
* by hardware. The next protocol field will be based on
* the mbuf->inner_esp_next_proto field.
*/
#define RTE_SECURITY_RX_HW_TRAILER_OFFLOAD 0x00010000

@ -29,7 +29,7 @@ extern "C" {
* be picked and dropped, the most likely candidate for drop, i.e. the
* current LRU key, is always picked. The LRU logic requires maintaining
* specific data structures per each bucket. Use-cases: flow cache, etc.
* b. Extendible bucket (ext): The bucket is extended with space for 4 more
* b. Extendable bucket (ext): The bucket is extended with space for 4 more
* keys. This is done by allocating additional memory at table init time,
* which is used to create a pool of free keys (the size of this pool is
* configurable and always a multiple of 4). On key add operation, the
@ -41,7 +41,7 @@ extern "C" {
* current bucket is in extended state and a match is not found in the
* first group of 4 keys, the search continues beyond the first group of
* 4 keys, potentially until all keys in this bucket are examined. The
* extendible bucket logic requires maintaining specific data structures
* extendable bucket logic requires maintaining specific data structures
* per table and per each bucket. Use-cases: flow table, etc.
* 2. Key size:
* a. Configurable key size
@ -86,7 +86,7 @@ struct rte_table_hash_params {
uint64_t seed;
};
/** Extendible bucket hash table operations */
/** Extendable bucket hash table operations */
extern struct rte_table_ops rte_table_hash_ext_ops;
extern struct rte_table_ops rte_table_hash_key8_ext_ops;
extern struct rte_table_ops rte_table_hash_key16_ext_ops;

@ -542,7 +542,7 @@ int rte_vhost_get_ifname(int vid, char *buf, size_t len);
* virtio queue index
*
* @return
* num of avail entires left
* num of avail entries left
*/
uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
@ -590,7 +590,7 @@ uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
/**
* Get guest mem table: a list of memory regions.
*
* An rte_vhost_vhost_memory object will be allocated internaly, to hold the
* An rte_vhost_vhost_memory object will be allocated internally, to hold the
* guest memory regions. Application should free it at destroy_device()
* callback.
*