fix spelling in comments and doxygen
Fix spelling errors in comments including doxygen found using codespell. Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> Acked-by: Ray Kinsella <mdr@ashroe.eu> Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com> Acked-by: Chenbo Xia <chenbo.xia@intel.com>
This commit is contained in:
parent
1306a73b19
commit
4a6672c2d3
@ -3653,7 +3653,7 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items,
|
||||
return nb_item;
|
||||
|
||||
/*
|
||||
* Then, check that all values in the list are differents.
|
||||
* Then, check that all values in the list are different.
|
||||
* No optimization here...
|
||||
*/
|
||||
for (i = 0; i < nb_item; i++) {
|
||||
|
@ -2965,7 +2965,7 @@ port_rss_reta_info(portid_t port_id,
|
||||
}
|
||||
|
||||
/*
|
||||
* Displays the RSS hash functions of a port, and, optionaly, the RSS hash
|
||||
* Displays the RSS hash functions of a port, and, optionally, the RSS hash
|
||||
* key of the port.
|
||||
*/
|
||||
void
|
||||
@ -5250,7 +5250,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
|
||||
{
|
||||
port->mc_addr_nb--;
|
||||
if (addr_idx == port->mc_addr_nb) {
|
||||
/* No need to recompact the set of multicast addressses. */
|
||||
/* No need to recompact the set of multicast addresses. */
|
||||
if (port->mc_addr_nb == 0) {
|
||||
/* free the pool of multicast addresses. */
|
||||
free(port->mc_addr_pool);
|
||||
|
@ -54,7 +54,7 @@ arp_op_name(uint16_t arp_op)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return "Unkwown ARP op";
|
||||
return "Unknown ARP op";
|
||||
}
|
||||
|
||||
static const char *
|
||||
|
@ -88,7 +88,7 @@
|
||||
*
|
||||
* - Invoke ``test_atomic_exchange`` on each lcore. Before doing
|
||||
* anything else, the cores wait for a synchronization event.
|
||||
* Each core then does the follwoing for N iterations:
|
||||
* Each core then does the following for N iterations:
|
||||
*
|
||||
* Generate a new token with a data integrity check
|
||||
* Exchange the new token for previously generated token
|
||||
|
@ -66,7 +66,7 @@ struct plock_test {
|
||||
struct lcore_plock_test {
|
||||
struct plock_test *pt[2]; /* shared, lock-protected data */
|
||||
uint64_t sum[2]; /* local copy of the shared data */
|
||||
uint64_t iter; /* number of iterations to perfom */
|
||||
uint64_t iter; /* number of iterations to perform */
|
||||
uint32_t lc; /* given lcore id */
|
||||
};
|
||||
|
||||
|
@ -2407,7 +2407,7 @@ static const struct ebpf_insn test_call5_prog[] = {
|
||||
},
|
||||
};
|
||||
|
||||
/* String comparision impelementation, return 0 if equal else difference */
|
||||
/* String comparison implementation, return 0 if equal else difference */
|
||||
static uint32_t
|
||||
dummy_func5(const char *s1, const char *s2)
|
||||
{
|
||||
|
@ -2033,7 +2033,7 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data,
|
||||
test_priv_data.all_decomp_data = &all_decomp_data;
|
||||
test_priv_data.decomp_produced_data_size = &decomp_produced_data_size;
|
||||
|
||||
test_priv_data.num_priv_xforms = 0; /* it's used for deompression only */
|
||||
test_priv_data.num_priv_xforms = 0; /* it's used for decompression only */
|
||||
|
||||
capa = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
|
||||
if (capa == NULL) {
|
||||
|
@ -253,7 +253,7 @@ hash_create_free(__rte_unused void *arg)
|
||||
rte_atomic32_inc(&obj_count);
|
||||
}
|
||||
|
||||
/* create mutiple times simultaneously */
|
||||
/* create multiple times simultaneously */
|
||||
for (i = 0; i < MAX_ITER_MULTI; i++) {
|
||||
snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
|
||||
hash_params.name = hash_name;
|
||||
@ -321,7 +321,7 @@ fbk_create_free(__rte_unused void *arg)
|
||||
rte_atomic32_inc(&obj_count);
|
||||
}
|
||||
|
||||
/* create mutiple fbk tables simultaneously */
|
||||
/* create multiple fbk tables simultaneously */
|
||||
for (i = 0; i < MAX_ITER_MULTI; i++) {
|
||||
snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
|
||||
fbk_params.name = fbk_name;
|
||||
@ -387,7 +387,7 @@ lpm_create_free(__rte_unused void *arg)
|
||||
rte_atomic32_inc(&obj_count);
|
||||
}
|
||||
|
||||
/* create mutiple fbk tables simultaneously */
|
||||
/* create multiple fbk tables simultaneously */
|
||||
for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
|
||||
snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
|
||||
lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
|
||||
|
@ -653,7 +653,7 @@ create_crypto_session(struct ipsec_unitest_params *ut,
|
||||
if (s == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* initiliaze SA crypto session for device */
|
||||
/* initialize SA crypto session for device */
|
||||
rc = rte_cryptodev_sym_session_init(dev_id, s,
|
||||
ut->crypto_xforms, qp->mp_session_private);
|
||||
if (rc == 0) {
|
||||
|
@ -3040,7 +3040,7 @@ test_balance_tx_burst_slave_tx_fail(void)
|
||||
first_tx_fail_idx = TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 -
|
||||
TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT;
|
||||
|
||||
/* copy mbuf referneces for expected transmission failures */
|
||||
/* copy mbuf references for expected transmission failures */
|
||||
for (i = 0; i < TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; i++)
|
||||
expected_fail_pkts[i] = pkts_burst_1[i + first_tx_fail_idx];
|
||||
|
||||
|
@ -179,7 +179,7 @@ test3(void)
|
||||
status = rte_lpm_add(NULL, ip, depth, next_hop);
|
||||
TEST_LPM_ASSERT(status < 0);
|
||||
|
||||
/*Create vaild lpm to use in rest of test. */
|
||||
/*Create valid lpm to use in rest of test. */
|
||||
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_LPM_ASSERT(lpm != NULL);
|
||||
|
||||
@ -217,7 +217,7 @@ test4(void)
|
||||
status = rte_lpm_delete(NULL, ip, depth);
|
||||
TEST_LPM_ASSERT(status < 0);
|
||||
|
||||
/*Create vaild lpm to use in rest of test. */
|
||||
/*Create valid lpm to use in rest of test. */
|
||||
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_LPM_ASSERT(lpm != NULL);
|
||||
|
||||
@ -255,7 +255,7 @@ test5(void)
|
||||
status = rte_lpm_lookup(NULL, ip, &next_hop_return);
|
||||
TEST_LPM_ASSERT(status < 0);
|
||||
|
||||
/*Create vaild lpm to use in rest of test. */
|
||||
/*Create valid lpm to use in rest of test. */
|
||||
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_LPM_ASSERT(lpm != NULL);
|
||||
|
||||
|
@ -261,7 +261,7 @@ test4(void)
|
||||
status = rte_lpm6_add(NULL, ip, depth, next_hop);
|
||||
TEST_LPM_ASSERT(status < 0);
|
||||
|
||||
/*Create vaild lpm to use in rest of test. */
|
||||
/*Create valid lpm to use in rest of test. */
|
||||
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_LPM_ASSERT(lpm != NULL);
|
||||
|
||||
@ -299,7 +299,7 @@ test5(void)
|
||||
status = rte_lpm6_delete(NULL, ip, depth);
|
||||
TEST_LPM_ASSERT(status < 0);
|
||||
|
||||
/*Create vaild lpm to use in rest of test. */
|
||||
/*Create valid lpm to use in rest of test. */
|
||||
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_LPM_ASSERT(lpm != NULL);
|
||||
|
||||
@ -337,7 +337,7 @@ test6(void)
|
||||
status = rte_lpm6_lookup(NULL, ip, &next_hop_return);
|
||||
TEST_LPM_ASSERT(status < 0);
|
||||
|
||||
/*Create vaild lpm to use in rest of test. */
|
||||
/*Create valid lpm to use in rest of test. */
|
||||
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_LPM_ASSERT(lpm != NULL);
|
||||
|
||||
@ -375,7 +375,7 @@ test7(void)
|
||||
status = rte_lpm6_lookup_bulk_func(NULL, ip, next_hop_return, 10);
|
||||
TEST_LPM_ASSERT(status < 0);
|
||||
|
||||
/*Create vaild lpm to use in rest of test. */
|
||||
/*Create valid lpm to use in rest of test. */
|
||||
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_LPM_ASSERT(lpm != NULL);
|
||||
|
||||
@ -413,7 +413,7 @@ test8(void)
|
||||
status = rte_lpm6_delete_bulk_func(NULL, ip, depth, 10);
|
||||
TEST_LPM_ASSERT(status < 0);
|
||||
|
||||
/*Create vaild lpm to use in rest of test. */
|
||||
/*Create valid lpm to use in rest of test. */
|
||||
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_LPM_ASSERT(lpm != NULL);
|
||||
|
||||
@ -433,7 +433,7 @@ test8(void)
|
||||
/*
|
||||
* Call add, lookup and delete for a single rule with depth < 24.
|
||||
* Check all the combinations for the first three bytes that result in a hit.
|
||||
* Delete the rule and check that the same test returs a miss.
|
||||
* Delete the rule and check that the same test returns a miss.
|
||||
*/
|
||||
int32_t
|
||||
test9(void)
|
||||
@ -1738,7 +1738,7 @@ test27(void)
|
||||
* Call add, lookup and delete for a single rule with maximum 21bit next_hop
|
||||
* size.
|
||||
* Check that next_hop returned from lookup is equal to provisioned value.
|
||||
* Delete the rule and check that the same test returs a miss.
|
||||
* Delete the rule and check that the same test returns a miss.
|
||||
*/
|
||||
int32_t
|
||||
test28(void)
|
||||
|
@ -603,7 +603,7 @@ test_realloc_numa(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Print warnign if only a single socket, but don't fail the test */
|
||||
/* Print warning if only a single socket, but don't fail the test */
|
||||
if (socket_count < 2)
|
||||
printf("WARNING: realloc_socket test needs memory on multiple sockets!\n");
|
||||
|
||||
@ -971,7 +971,7 @@ test_alloc_socket(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Print warnign if only a single socket, but don't fail the test */
|
||||
/* Print warning if only a single socket, but don't fail the test */
|
||||
if (socket_count < 2) {
|
||||
printf("WARNING: alloc_socket test needs memory on multiple sockets!\n");
|
||||
}
|
||||
|
@ -1148,7 +1148,7 @@ test_refcnt_mbuf(void)
|
||||
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
/* check that we porcessed all references */
|
||||
/* check that we processed all references */
|
||||
tref = 0;
|
||||
main_lcore = rte_get_main_lcore();
|
||||
|
||||
|
@ -994,7 +994,7 @@ test_mempool(void)
|
||||
if (test_mempool_basic_ex(mp_nocache) < 0)
|
||||
GOTO_ERR(ret, err);
|
||||
|
||||
/* mempool operation test based on single producer and single comsumer */
|
||||
/* mempool operation test based on single producer and single consumer */
|
||||
if (test_mempool_sp_sc() < 0)
|
||||
GOTO_ERR(ret, err);
|
||||
|
||||
|
@ -88,7 +88,7 @@ static uint32_t synchro;
|
||||
static unsigned n_get_bulk;
|
||||
static unsigned n_put_bulk;
|
||||
|
||||
/* number of objects retrived from mempool before putting them back */
|
||||
/* number of objects retrieved from mempool before putting them back */
|
||||
static unsigned n_keep;
|
||||
|
||||
/* number of enqueues / dequeues */
|
||||
|
@ -444,7 +444,7 @@ tm_test_srtcm_color_aware_check(void)
|
||||
* if using blind check
|
||||
*/
|
||||
|
||||
/* previouly have a green, test points should keep unchanged */
|
||||
/* previously have a green, test points should keep unchanged */
|
||||
in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN;
|
||||
out[0] = RTE_COLOR_GREEN;
|
||||
out[1] = RTE_COLOR_YELLOW;
|
||||
@ -551,7 +551,7 @@ tm_test_trtcm_color_aware_check(void)
|
||||
* if using blind check
|
||||
*/
|
||||
|
||||
/* previouly have a green, test points should keep unchanged */
|
||||
/* previously have a green, test points should keep unchanged */
|
||||
in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN;
|
||||
out[0] = RTE_COLOR_GREEN;
|
||||
out[1] = RTE_COLOR_YELLOW;
|
||||
@ -648,7 +648,7 @@ tm_test_trtcm_rfc4115_color_aware_check(void)
|
||||
* if using blind check
|
||||
*/
|
||||
|
||||
/* previouly have a green, test points should keep unchanged */
|
||||
/* previously have a green, test points should keep unchanged */
|
||||
in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN;
|
||||
out[0] = RTE_COLOR_GREEN;
|
||||
out[1] = RTE_COLOR_YELLOW;
|
||||
|
@ -752,7 +752,7 @@ test_pmd_perf(void)
|
||||
"rte_eth_dev_start: err=%d, port=%d\n",
|
||||
ret, portid);
|
||||
|
||||
/* always eanble promiscuous */
|
||||
/* always enable promiscuous */
|
||||
ret = rte_eth_promiscuous_enable(portid);
|
||||
if (ret != 0)
|
||||
rte_exit(EXIT_FAILURE,
|
||||
|
@ -432,7 +432,7 @@ timer_basic_cb(struct rte_timer *tim, void *arg)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Explicitelly stop timer 0. Once stop() called, we can even
|
||||
/* Explicitly stop timer 0. Once stop() called, we can even
|
||||
* erase the content of the structure: it is not referenced
|
||||
* anymore by any code (in case of dynamic structure, it can
|
||||
* be freed) */
|
||||
|
@ -45,7 +45,7 @@ struct rte_acl_bitset {
|
||||
* Each transition is 64 bit value with the following format:
|
||||
* | node_type_specific : 32 | node_type : 3 | node_addr : 29 |
|
||||
* For all node types except RTE_ACL_NODE_MATCH, node_addr is an index
|
||||
* to the start of the node in the transtions array.
|
||||
* to the start of the node in the transitions array.
|
||||
* Few different node types are used:
|
||||
* RTE_ACL_NODE_MATCH:
|
||||
* node_addr value is and index into an array that contains the return value
|
||||
@ -66,7 +66,7 @@ struct rte_acl_bitset {
|
||||
* RTE_ACL_NODE_SINGLE:
|
||||
* always transitions to the same node regardless of the input value.
|
||||
* RTE_ACL_NODE_DFA:
|
||||
* that node consits of up to 256 transitions.
|
||||
* that node consists of up to 256 transitions.
|
||||
* In attempt to conserve space all transitions are divided into 4 consecutive
|
||||
* groups, by 64 transitions per group:
|
||||
* group64[i] contains transitions[i * 64, .. i * 64 + 63].
|
||||
|
@ -1494,7 +1494,7 @@ acl_set_data_indexes(struct rte_acl_ctx *ctx)
|
||||
/*
|
||||
* Internal routine, performs 'build' phase of trie generation:
|
||||
* - setups build context.
|
||||
* - analizes given set of rules.
|
||||
* - analyzes given set of rules.
|
||||
* - builds internal tree(s).
|
||||
*/
|
||||
static int
|
||||
|
@ -125,7 +125,7 @@ acl_process_matches_avx2x8(const struct rte_acl_ctx *ctx,
|
||||
/* For each transition: put low 32 into tr_lo and high 32 into tr_hi */
|
||||
ACL_TR_HILO(mm256, __m256, t0, t1, lo, hi);
|
||||
|
||||
/* Keep transitions wth NOMATCH intact. */
|
||||
/* Keep transitions with NOMATCH intact. */
|
||||
*tr_lo = _mm256_blendv_epi8(*tr_lo, lo, matches);
|
||||
*tr_hi = _mm256_blendv_epi8(*tr_hi, hi, matches);
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ rte_bbdev_data_alloc(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Find data alocated for the device or if not found return first unused bbdev
|
||||
* Find data allocated for the device or if not found return first unused bbdev
|
||||
* data. If all structures are in use and none is used by the device return
|
||||
* NULL.
|
||||
*/
|
||||
|
@ -1245,7 +1245,7 @@ emit_epilog(struct bpf_jit_state *st)
|
||||
uint32_t i;
|
||||
int32_t spil, ofs;
|
||||
|
||||
/* if we allready have an epilog generate a jump to it */
|
||||
/* if we already have an epilog generate a jump to it */
|
||||
if (st->exit.num++ != 0) {
|
||||
emit_abs_jmp(st, st->exit.off);
|
||||
return;
|
||||
|
@ -80,7 +80,7 @@ resolve_xsym(const char *sn, size_t ofs, struct ebpf_insn *ins, size_t ins_sz,
|
||||
if (type == RTE_BPF_XTYPE_FUNC) {
|
||||
|
||||
/* we don't support multiple functions per BPF module,
|
||||
* so treat EBPF_PSEUDO_CALL to extrernal function
|
||||
* so treat EBPF_PSEUDO_CALL to external function
|
||||
* as an ordinary EBPF_CALL.
|
||||
*/
|
||||
if (ins[idx].src_reg == EBPF_PSEUDO_CALL) {
|
||||
|
@ -166,7 +166,7 @@ bpf_eth_cbh_add(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)
|
||||
}
|
||||
|
||||
/*
|
||||
* BPF packet processing routinies.
|
||||
* BPF packet processing routines.
|
||||
*/
|
||||
|
||||
static inline uint32_t
|
||||
|
@ -1730,7 +1730,7 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
|
||||
|
||||
/*
|
||||
* make sure that instruction syntax is valid,
|
||||
* and it fields don't violate partciular instrcution type restrictions.
|
||||
* and its fields don't violate particular instruction type restrictions.
|
||||
*/
|
||||
static const char *
|
||||
check_syntax(const struct ebpf_insn *ins)
|
||||
@ -1961,7 +1961,7 @@ log_loop(const struct bpf_verifier *bvf)
|
||||
* First pass goes though all instructions in the set, checks that each
|
||||
* instruction is a valid one (correct syntax, valid field values, etc.)
|
||||
* and constructs control flow graph (CFG).
|
||||
* Then deapth-first search is performed over the constructed graph.
|
||||
* Then depth-first search is performed over the constructed graph.
|
||||
* Programs with unreachable instructions and/or loops will be rejected.
|
||||
*/
|
||||
static int
|
||||
@ -1988,7 +1988,7 @@ validate(struct bpf_verifier *bvf)
|
||||
|
||||
/*
|
||||
* construct CFG, jcc nodes have to outgoing edges,
|
||||
* 'exit' nodes - none, all others nodes have exaclty one
|
||||
* 'exit' nodes - none, all other nodes have exactly one
|
||||
* outgoing edge.
|
||||
*/
|
||||
switch (ins->code) {
|
||||
@ -2258,7 +2258,7 @@ evaluate(struct bpf_verifier *bvf)
|
||||
idx = get_node_idx(bvf, node);
|
||||
op = ins[idx].code;
|
||||
|
||||
/* for jcc node make a copy of evaluatoion state */
|
||||
/* for jcc node make a copy of evaluation state */
|
||||
if (node->nb_edge > 1)
|
||||
rc |= save_eval_state(bvf, node);
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
/*
|
||||
* Provides backwards compatibility when updating exported functions.
|
||||
* When a symol is exported from a library to provide an API, it also provides a
|
||||
* When a symbol is exported from a library to provide an API, it also provides a
|
||||
* calling convention (ABI) that is embodied in its name, return type,
|
||||
* arguments, etc. On occasion that function may need to change to accommodate
|
||||
* new functionality, behavior, etc. When that occurs, it is desirable to
|
||||
|
@ -30,7 +30,7 @@ extern "C" {
|
||||
* with the given regular expression pattern.
|
||||
*
|
||||
* @param pattern
|
||||
* regular expression notation decribing the pattern to match
|
||||
* regular expression notation describing the pattern to match
|
||||
*
|
||||
* @param string
|
||||
* source string to searcg for the pattern
|
||||
|
@ -525,7 +525,7 @@ event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
|
||||
|
||||
RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
|
||||
|
||||
/* Determine the largest contigous run we can attempt to enqueue to the
|
||||
/* Determine the largest contiguous run we can attempt to enqueue to the
|
||||
* event device.
|
||||
*/
|
||||
if (head_idx > tail_idx)
|
||||
|
@ -28,7 +28,7 @@ EAL_REGISTER_TAILQ(rte_thash_tailq)
|
||||
|
||||
/**
|
||||
* Table of some irreducible polinomials over GF(2).
|
||||
* For lfsr they are reperesented in BE bit order, and
|
||||
* For lfsr they are represented in BE bit order, and
|
||||
* x^0 is masked out.
|
||||
* For example, poly x^5 + x^2 + 1 will be represented
|
||||
* as (101001b & 11111b) = 01001b = 0x9
|
||||
|
@ -80,7 +80,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
|
||||
|
||||
/*
|
||||
* Process new mbuf with fragment of IPV4 packet.
|
||||
* Incoming mbuf should have it's l2_len/l3_len fields setuped correclty.
|
||||
* Incoming mbuf should have it's l2_len/l3_len fields setup correctly.
|
||||
* @param tbl
|
||||
* Table where to lookup/add the fragmented packet.
|
||||
* @param mb
|
||||
|
@ -475,7 +475,7 @@ trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml,
|
||||
|
||||
/*
|
||||
* packet checks for tunnel mode:
|
||||
* - same as for trasnport mode
|
||||
* - same as for transport mode
|
||||
* - esp tail next proto contains expected for that SA value
|
||||
*/
|
||||
static inline int32_t
|
||||
@ -561,7 +561,7 @@ trs_process_step3(struct rte_mbuf *mb)
|
||||
static inline void
|
||||
tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
|
||||
{
|
||||
/* reset mbuf metatdata: L2/L3 len, packet type */
|
||||
/* reset mbuf metadata: L2/L3 len, packet type */
|
||||
mb->packet_type = RTE_PTYPE_UNKNOWN;
|
||||
mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
|
||||
|
||||
|
@ -614,7 +614,7 @@ cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
|
||||
|
||||
/*
|
||||
* process outbound packets for SA with ESN support,
|
||||
* for algorithms that require SQN.hibits to be implictly included
|
||||
* for algorithms that require SQN.hibits to be implicitly included
|
||||
* into digest computation.
|
||||
* In that case we have to move ICV bytes back to their proper place.
|
||||
*/
|
||||
|
@ -62,7 +62,7 @@ EAL_REGISTER_TAILQ(rte_ipsec_sad_tailq)
|
||||
* Inserts a rule into an appropriate hash table,
|
||||
* updates the value for a given SPI in SPI_ONLY hash table
|
||||
* reflecting presence of more specific rule type in two LSBs.
|
||||
* Updates a counter that reflects the number of rules whith the same SPI.
|
||||
* Updates a counter that reflects the number of rules with the same SPI.
|
||||
*/
|
||||
static inline int
|
||||
add_specific(struct rte_ipsec_sad *sad, const void *key,
|
||||
|
@ -136,7 +136,7 @@ ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
|
||||
/*
|
||||
* RFC 4303 recommends 64 as minimum window size.
|
||||
* there is no point to use ESN mode without SQN window,
|
||||
* so make sure we have at least 64 window when ESN is enalbed.
|
||||
* so make sure we have at least 64 window when ESN is enabled.
|
||||
*/
|
||||
wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
|
||||
RTE_IPSEC_SATP_ESN_DISABLE) ?
|
||||
|
@ -122,7 +122,7 @@ struct rte_ipsec_sa {
|
||||
* In case of SA handled by multiple threads *sqn* cacheline
|
||||
* could be shared by multiple cores.
|
||||
* To minimise performance impact, we try to locate in a separate
|
||||
* place from other frequently accesed data.
|
||||
* place from other frequently accessed data.
|
||||
*/
|
||||
union {
|
||||
uint64_t outb;
|
||||
|
@ -142,9 +142,9 @@ struct rte_gtp_psc_type1_hdr {
|
||||
/** GTP header length */
|
||||
#define RTE_ETHER_GTP_HLEN \
|
||||
(sizeof(struct rte_udp_hdr) + sizeof(struct rte_gtp_hdr))
|
||||
/* GTP next protocal type */
|
||||
#define RTE_GTP_TYPE_IPV4 0x40 /**< GTP next protocal type IPv4 */
|
||||
#define RTE_GTP_TYPE_IPV6 0x60 /**< GTP next protocal type IPv6 */
|
||||
/* GTP next protocol type */
|
||||
#define RTE_GTP_TYPE_IPV4 0x40 /**< GTP next protocol type IPv4 */
|
||||
#define RTE_GTP_TYPE_IPV6 0x60 /**< GTP next protocol type IPv6 */
|
||||
/* GTP destination port number */
|
||||
#define RTE_GTPC_UDP_PORT 2123 /**< GTP-C UDP destination port */
|
||||
#define RTE_GTPU_UDP_PORT 2152 /**< GTP-U UDP destination port */
|
||||
|
@ -67,7 +67,7 @@ struct ethdev_rx_node_main *ethdev_rx_get_node_data_get(void);
|
||||
*
|
||||
* Get the Ethernet Rx node.
|
||||
*
|
||||
* @retrun
|
||||
* @return
|
||||
* Pointer to the Ethernet Rx node.
|
||||
*/
|
||||
struct rte_node_register *ethdev_rx_node_get(void);
|
||||
|
@ -46,7 +46,7 @@ struct ethdev_tx_node_main *ethdev_tx_node_data_get(void);
|
||||
*
|
||||
* Get the Ethernet Tx node.
|
||||
*
|
||||
* @retrun
|
||||
* @return
|
||||
* Pointer to the Ethernet Tx node.
|
||||
*/
|
||||
struct rte_node_register *ethdev_tx_node_get(void);
|
||||
|
@ -49,7 +49,7 @@ struct ip4_rewrite_node_main {
|
||||
*
|
||||
* Get the ipv4 rewrite node.
|
||||
*
|
||||
* @retrun
|
||||
* @return
|
||||
* Pointer to the ipv4 rewrite node.
|
||||
*/
|
||||
struct rte_node_register *ip4_rewrite_node_get(void);
|
||||
|
@ -647,7 +647,7 @@ struct rte_swx_pipeline_selector_params {
|
||||
const char *group_id_field_name;
|
||||
|
||||
/** The set of fields used to select (through a hashing scheme) the
|
||||
* member within the current group. Inputs into the seletion operation.
|
||||
* member within the current group. Inputs into the selection operation.
|
||||
* Restriction: All the selector fields must be part of the same struct,
|
||||
* i.e. part of the same header or part of the meta-data structure.
|
||||
*/
|
||||
|
@ -159,7 +159,7 @@ power_get_available_freqs(struct acpi_power_info *pi)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Store the available frequncies into power context */
|
||||
/* Store the available frequencies into power context */
|
||||
for (i = 0, pi->nb_freqs = 0; i < count; i++) {
|
||||
POWER_DEBUG_TRACE("Lcore %u frequency[%d]: %s\n", pi->lcore_id,
|
||||
i, freqs[i]);
|
||||
|
@ -362,7 +362,7 @@ rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
|
||||
|
||||
/* The reader can go offline only after the load of the
|
||||
* data structure is completed. i.e. any load of the
|
||||
* data strcture can not move after this store.
|
||||
* data structure can not move after this store.
|
||||
*/
|
||||
|
||||
__atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
|
||||
|
@ -199,7 +199,7 @@ rte_rib6_lookup_exact(struct rte_rib6 *rib,
|
||||
}
|
||||
|
||||
/*
|
||||
* Traverses on subtree and retreeves more specific routes
|
||||
* Traverses on subtree and retrieves more specific routes
|
||||
* for a given in args ip/depth prefix
|
||||
* last = NULL means the first invocation
|
||||
*/
|
||||
|
@ -590,7 +590,7 @@ rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
|
||||
|
||||
subport->qsize_add[0] = 0;
|
||||
|
||||
/* Strict prority traffic class */
|
||||
/* Strict priority traffic class */
|
||||
for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
|
||||
subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
|
||||
|
||||
|
@ -796,7 +796,7 @@ rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx,
|
||||
/**
|
||||
* Set split inflight descriptor.
|
||||
*
|
||||
* This function save descriptors that has been comsumed in available
|
||||
* This function save descriptors that has been consumed in available
|
||||
* ring
|
||||
*
|
||||
* @param vid
|
||||
@ -815,7 +815,7 @@ rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
|
||||
/**
|
||||
* Set packed inflight descriptor and get corresponding inflight entry
|
||||
*
|
||||
* This function save descriptors that has been comsumed
|
||||
* This function save descriptors that has been consumed
|
||||
*
|
||||
* @param vid
|
||||
* vhost device ID
|
||||
|
Loading…
Reference in New Issue
Block a user