fix spelling in comments and strings
The tool comes from https://github.com/jsoref Signed-off-by: Josh Soref <jsoref@gmail.com> Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
@ -630,7 +630,7 @@ metrics_display(int port_id)
|
||||
|
||||
names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0);
|
||||
if (names == NULL) {
|
||||
printf("Cannot allocate memory for metrcis names\n");
|
||||
printf("Cannot allocate memory for metrics names\n");
|
||||
rte_free(metrics);
|
||||
return;
|
||||
}
|
||||
@ -1109,7 +1109,7 @@ show_tm(void)
|
||||
caplevel.n_nodes_max,
|
||||
caplevel.n_nodes_nonleaf_max,
|
||||
caplevel.n_nodes_leaf_max);
|
||||
printf("\t -- indetical: non leaf %u leaf %u\n",
|
||||
printf("\t -- identical: non leaf %u leaf %u\n",
|
||||
caplevel.non_leaf_nodes_identical,
|
||||
caplevel.leaf_nodes_identical);
|
||||
|
||||
@ -1263,7 +1263,7 @@ show_ring(char *name)
|
||||
printf(" - Name (%s) on socket (%d)\n"
|
||||
" - flags:\n"
|
||||
"\t -- Single Producer Enqueue (%u)\n"
|
||||
"\t -- Single Consmer Dequeue (%u)\n",
|
||||
"\t -- Single Consumer Dequeue (%u)\n",
|
||||
ptr->name,
|
||||
ptr->memzone->socket_id,
|
||||
ptr->flags & RING_F_SP_ENQ,
|
||||
|
@ -386,8 +386,8 @@ parse_cb_ipv4_trace(char *str, struct ipv4_5tuple *v)
|
||||
}
|
||||
|
||||
/*
|
||||
* Parses IPV6 address, exepcts the following format:
|
||||
* XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
|
||||
* Parse IPv6 address, expects the following format:
|
||||
* XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X is a hexadecimal digit).
|
||||
*/
|
||||
static int
|
||||
parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
|
||||
@ -994,7 +994,7 @@ print_usage(const char *prgname)
|
||||
"should be either 1 or multiple of %zu, "
|
||||
"but not greater then %u]\n"
|
||||
"[--" OPT_MAX_SIZE
|
||||
"=<size limit (in bytes) for runtime ACL strucutures> "
|
||||
"=<size limit (in bytes) for runtime ACL structures> "
|
||||
"leave 0 for default behaviour]\n"
|
||||
"[--" OPT_ITER_NUM "=<number of iterations to perform>]\n"
|
||||
"[--" OPT_VERBOSE "=<verbose level>]\n"
|
||||
|
@ -180,7 +180,7 @@ main_loop(struct cperf_cyclecount_ctx *ctx, enum rte_comp_xform_type type)
|
||||
|
||||
if (ops == NULL) {
|
||||
RTE_LOG(ERR, USER1,
|
||||
"Can't allocate memory for ops strucures\n");
|
||||
"Can't allocate memory for ops structures\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ main_loop(struct cperf_benchmark_ctx *ctx, enum rte_comp_xform_type type)
|
||||
|
||||
if (ops == NULL) {
|
||||
RTE_LOG(ERR, USER1,
|
||||
"Can't allocate memory for ops strucures\n");
|
||||
"Can't allocate memory for ops structures\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type)
|
||||
|
||||
if (ops == NULL) {
|
||||
RTE_LOG(ERR, USER1,
|
||||
"Can't allocate memory for ops strucures\n");
|
||||
"Can't allocate memory for ops structures\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ comp_perf_check_capabilities(struct comp_test_data *test_data, uint8_t cdev_id)
|
||||
|
||||
uint64_t comp_flags = cap->comp_feature_flags;
|
||||
|
||||
/* Huffman enconding */
|
||||
/* Huffman encoding */
|
||||
if (test_data->huffman_enc == RTE_COMP_HUFFMAN_FIXED &&
|
||||
(comp_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) {
|
||||
RTE_LOG(ERR, USER1,
|
||||
|
@ -334,7 +334,7 @@ pmd_cyclecount_bench_burst_sz(
|
||||
* queue, so we never get any failed enqs unless the driver won't accept
|
||||
* the exact number of descriptors we requested, or the driver won't
|
||||
* wrap around the end of the TX ring. However, since we're only
|
||||
* dequeueing once we've filled up the queue, we have to benchmark it
|
||||
* dequeuing once we've filled up the queue, we have to benchmark it
|
||||
* piecemeal and then average out the results.
|
||||
*/
|
||||
cur_op = 0;
|
||||
|
@ -336,7 +336,7 @@ usage(char *program)
|
||||
"\t--deq_tmo_nsec : global dequeue timeout\n"
|
||||
"\t--prod_type_ethdev : use ethernet device as producer.\n"
|
||||
"\t--prod_type_timerdev : use event timer device as producer.\n"
|
||||
"\t expity_nsec would be the timeout\n"
|
||||
"\t expiry_nsec would be the timeout\n"
|
||||
"\t in ns.\n"
|
||||
"\t--prod_type_timerdev_burst : use timer device as producer\n"
|
||||
"\t burst mode.\n"
|
||||
|
@ -253,7 +253,7 @@ void
|
||||
order_opt_dump(struct evt_options *opt)
|
||||
{
|
||||
evt_dump_producer_lcores(opt);
|
||||
evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
|
||||
evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
|
||||
evt_dump_worker_lcores(opt);
|
||||
evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
|
||||
}
|
||||
|
@ -624,7 +624,7 @@ print_usage(void)
|
||||
"(if -f is not specified)>]\n"
|
||||
"[-r <percentage ratio of random ip's to lookup"
|
||||
"(if -t is not specified)>]\n"
|
||||
"[-c <do comarison with LPM library>]\n"
|
||||
"[-c <do comparison with LPM library>]\n"
|
||||
"[-6 <do tests with ipv6 (default ipv4)>]\n"
|
||||
"[-s <shuffle randomly generated routes>]\n"
|
||||
"[-a <check nexthops for all ipv4 address space"
|
||||
@ -641,7 +641,7 @@ print_usage(void)
|
||||
"[-g <number of tbl8's for dir24_8 or trie FIBs>]\n"
|
||||
"[-w <path to the file to dump routing table>]\n"
|
||||
"[-u <path to the file to dump ip's for lookup>]\n"
|
||||
"[-v <type of loookup function:"
|
||||
"[-v <type of lookup function:"
|
||||
"\ts1, s2, s3 (3 types of scalar), v (vector) -"
|
||||
" for DIR24_8 based FIB\n"
|
||||
"\ts, v - for TRIE based ipv6 FIB>]\n",
|
||||
|
@ -28,7 +28,7 @@
|
||||
#define PORT_ID_DST 1
|
||||
#define TEID_VALUE 1
|
||||
|
||||
/* Flow items/acctions max size */
|
||||
/* Flow items/actions max size */
|
||||
#define MAX_ITEMS_NUM 32
|
||||
#define MAX_ACTIONS_NUM 32
|
||||
#define MAX_ATTRS_NUM 16
|
||||
|
@ -1519,7 +1519,7 @@ dump_used_cpu_time(const char *item,
|
||||
* threads time.
|
||||
*
|
||||
* Throughput: total count of rte rules divided
|
||||
* over the average of the time cosumed by all
|
||||
* over the average of the time consumed by all
|
||||
* threads time.
|
||||
*/
|
||||
double insertion_latency_time;
|
||||
|
@ -561,7 +561,7 @@ static void cmd_help_long_parsed(void *parsed_result,
|
||||
" Set the option to enable display of RX and TX bursts.\n"
|
||||
|
||||
"set port (port_id) vf (vf_id) rx|tx on|off\n"
|
||||
" Enable/Disable a VF receive/tranmit from a port\n\n"
|
||||
" Enable/Disable a VF receive/transmit from a port\n\n"
|
||||
|
||||
"set port (port_id) vf (vf_id) rxmode (AUPE|ROPE|BAM"
|
||||
"|MPE) (on|off)\n"
|
||||
|
@ -2162,7 +2162,7 @@ static const struct token token_list[] = {
|
||||
},
|
||||
[COMMON_POLICY_ID] = {
|
||||
.name = "{policy_id}",
|
||||
.type = "POLCIY_ID",
|
||||
.type = "POLICY_ID",
|
||||
.help = "policy id",
|
||||
.call = parse_int,
|
||||
.comp = comp_none,
|
||||
@ -2370,7 +2370,7 @@ static const struct token token_list[] = {
|
||||
},
|
||||
[TUNNEL_DESTROY] = {
|
||||
.name = "destroy",
|
||||
.help = "destroy tunel",
|
||||
.help = "destroy tunnel",
|
||||
.next = NEXT(NEXT_ENTRY(TUNNEL_DESTROY_ID),
|
||||
NEXT_ENTRY(COMMON_PORT_ID)),
|
||||
.args = ARGS(ARGS_ENTRY(struct buffer, port)),
|
||||
@ -2378,7 +2378,7 @@ static const struct token token_list[] = {
|
||||
},
|
||||
[TUNNEL_DESTROY_ID] = {
|
||||
.name = "id",
|
||||
.help = "tunnel identifier to testroy",
|
||||
.help = "tunnel identifier to destroy",
|
||||
.next = NEXT(NEXT_ENTRY(COMMON_UNSIGNED)),
|
||||
.args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
|
||||
.call = parse_tunnel,
|
||||
|
@ -69,7 +69,7 @@ print_err_msg(struct rte_tm_error *error)
|
||||
[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS]
|
||||
= "num shared shapers field (node params)",
|
||||
[RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE]
|
||||
= "wfq weght mode field (node params)",
|
||||
= "wfq weight mode field (node params)",
|
||||
[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES]
|
||||
= "num strict priorities field (node params)",
|
||||
[RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN]
|
||||
@ -479,7 +479,7 @@ static void cmd_show_port_tm_level_cap_parsed(void *parsed_result,
|
||||
cmdline_parse_inst_t cmd_show_port_tm_level_cap = {
|
||||
.f = cmd_show_port_tm_level_cap_parsed,
|
||||
.data = NULL,
|
||||
.help_str = "Show Port TM Hierarhical level Capabilities",
|
||||
.help_str = "Show port TM hierarchical level capabilities",
|
||||
.tokens = {
|
||||
(void *)&cmd_show_port_tm_level_cap_show,
|
||||
(void *)&cmd_show_port_tm_level_cap_port,
|
||||
|
@ -796,7 +796,7 @@ pkt_copy_split(const struct rte_mbuf *pkt)
|
||||
*
|
||||
* The testpmd command line for this forward engine sets the flags
|
||||
* TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
|
||||
* wether a checksum must be calculated in software or in hardware. The
|
||||
* whether a checksum must be calculated in software or in hardware. The
|
||||
* IP, UDP, TCP and SCTP flags always concern the inner layer. The
|
||||
* OUTER_IP is only useful for tunnel packets.
|
||||
*/
|
||||
|
@ -110,7 +110,7 @@ usage(char* progname)
|
||||
"If the drop-queue doesn't exist, the packet is dropped. "
|
||||
"By default drop-queue=127.\n");
|
||||
#ifdef RTE_LIB_LATENCYSTATS
|
||||
printf(" --latencystats=N: enable latency and jitter statistcs "
|
||||
printf(" --latencystats=N: enable latency and jitter statistics "
|
||||
"monitoring on forwarding lcore id N.\n");
|
||||
#endif
|
||||
printf(" --disable-crc-strip: disable CRC stripping by hardware.\n");
|
||||
|
@ -449,7 +449,7 @@ uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
|
||||
uint8_t latencystats_enabled;
|
||||
|
||||
/*
|
||||
* Lcore ID to serive latency statistics.
|
||||
* Lcore ID to service latency statistics.
|
||||
*/
|
||||
lcoreid_t latencystats_lcore_id = -1;
|
||||
|
||||
|
@ -174,14 +174,14 @@ update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr) +
|
||||
sizeof(struct rte_udp_hdr)));
|
||||
/* updata udp pkt length */
|
||||
/* update UDP packet length */
|
||||
udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
|
||||
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
|
||||
|
||||
/* updata ip pkt length and csum */
|
||||
/* update IP packet length and checksum */
|
||||
ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ip_hdr->hdr_checksum = 0;
|
||||
|
@ -11,7 +11,7 @@
|
||||
* (https://en.wikipedia.org/wiki/Peterson%27s_algorithm)
|
||||
* for two execution units to make sure that rte_smp_mb() prevents
|
||||
* store-load reordering to happen.
|
||||
* Also when executed on a single lcore could be used as a approxiamate
|
||||
* Also when executed on a single lcore could be used as a approximate
|
||||
* estimation of number of cycles particular implementation of rte_smp_mb()
|
||||
* will take.
|
||||
*/
|
||||
|
@ -23,7 +23,7 @@
|
||||
/*
|
||||
* Basic functional tests for librte_bpf.
|
||||
* The main procedure - load eBPF program, execute it and
|
||||
* compare restuls with expected values.
|
||||
* compare results with expected values.
|
||||
*/
|
||||
|
||||
struct dummy_offset {
|
||||
@ -2707,7 +2707,7 @@ test_ld_mbuf1_check(uint64_t rc, const void *arg)
|
||||
}
|
||||
|
||||
/*
|
||||
* same as ld_mbuf1, but then trancate the mbuf by 1B,
|
||||
* same as ld_mbuf1, but then truncate the mbuf by 1B,
|
||||
* so load of last 4B fail.
|
||||
*/
|
||||
static void
|
||||
|
@ -1256,7 +1256,7 @@ test_deflate_comp_run(const struct interim_data_params *int_data,
|
||||
/*
|
||||
* Store original operation index in private data,
|
||||
* since ordering does not have to be maintained,
|
||||
* when dequeueing from compressdev, so a comparison
|
||||
* when dequeuing from compressdev, so a comparison
|
||||
* at the end of the test can be done.
|
||||
*/
|
||||
priv_data = (struct priv_op_data *) (ops[i] + 1);
|
||||
|
@ -6870,7 +6870,7 @@ test_snow3g_decryption_with_digest_test_case_1(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Function prepare data for hash veryfication test case.
|
||||
* Function prepare data for hash verification test case.
|
||||
* Digest is allocated in 4 last bytes in plaintext, pattern.
|
||||
*/
|
||||
snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data);
|
||||
|
@ -346,7 +346,7 @@ test_fib_perf(void)
|
||||
fib = rte_fib_create(__func__, SOCKET_ID_ANY, &config);
|
||||
TEST_FIB_ASSERT(fib != NULL);
|
||||
|
||||
/* Measue add. */
|
||||
/* Measure add. */
|
||||
begin = rte_rdtsc();
|
||||
|
||||
for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
|
||||
|
@ -326,7 +326,7 @@ test_kni_register_handler_mp(void)
|
||||
|
||||
/* Check with the invalid parameters */
|
||||
if (rte_kni_register_handlers(kni, NULL) == 0) {
|
||||
printf("Unexpectedly register successuflly "
|
||||
printf("Unexpectedly register successfully "
|
||||
"with NULL ops pointer\n");
|
||||
exit(-1);
|
||||
}
|
||||
@ -475,7 +475,7 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
|
||||
|
||||
/**
|
||||
* Check multiple processes support on
|
||||
* registerring/unregisterring handlers.
|
||||
* registering/unregistering handlers.
|
||||
*/
|
||||
if (test_kni_register_handler_mp() < 0) {
|
||||
printf("fail to check multiple process support\n");
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
#include "test.h"
|
||||
|
||||
/* incrementd in handler, to check it is properly called once per
|
||||
/* incremented in handler, to check it is properly called once per
|
||||
* key/value association */
|
||||
static unsigned count;
|
||||
|
||||
@ -107,14 +107,14 @@ static int test_valid_kvargs(void)
|
||||
goto fail;
|
||||
}
|
||||
count = 0;
|
||||
/* call check_handler() for all entries with key="unexistant_key" */
|
||||
if (rte_kvargs_process(kvlist, "unexistant_key", check_handler, NULL) < 0) {
|
||||
/* call check_handler() for all entries with key="nonexistent_key" */
|
||||
if (rte_kvargs_process(kvlist, "nonexistent_key", check_handler, NULL) < 0) {
|
||||
printf("rte_kvargs_process() error\n");
|
||||
rte_kvargs_free(kvlist);
|
||||
goto fail;
|
||||
}
|
||||
if (count != 0) {
|
||||
printf("invalid count value %d after rte_kvargs_process(unexistant_key)\n",
|
||||
printf("invalid count value %d after rte_kvargs_process(nonexistent_key)\n",
|
||||
count);
|
||||
rte_kvargs_free(kvlist);
|
||||
goto fail;
|
||||
@ -135,10 +135,10 @@ static int test_valid_kvargs(void)
|
||||
rte_kvargs_free(kvlist);
|
||||
goto fail;
|
||||
}
|
||||
/* count all entries with key="unexistant_key" */
|
||||
count = rte_kvargs_count(kvlist, "unexistant_key");
|
||||
/* count all entries with key="nonexistent_key" */
|
||||
count = rte_kvargs_count(kvlist, "nonexistent_key");
|
||||
if (count != 0) {
|
||||
printf("invalid count value %d after rte_kvargs_count(unexistant_key)\n",
|
||||
printf("invalid count value %d after rte_kvargs_count(nonexistent_key)\n",
|
||||
count);
|
||||
rte_kvargs_free(kvlist);
|
||||
goto fail;
|
||||
@ -156,7 +156,7 @@ static int test_valid_kvargs(void)
|
||||
/* call check_handler() on all entries with key="check", it
|
||||
* should fail as the value is not recognized by the handler */
|
||||
if (rte_kvargs_process(kvlist, "check", check_handler, NULL) == 0) {
|
||||
printf("rte_kvargs_process() is success bu should not\n");
|
||||
printf("rte_kvargs_process() is success but should not\n");
|
||||
rte_kvargs_free(kvlist);
|
||||
goto fail;
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ struct ips_tbl_entry {
|
||||
* in previous test_lpm6_routes.h . Because this table has only 1000
|
||||
* lines, keeping it doesn't make LPM6 test case so large and also
|
||||
* make the algorithm to generate rule table unnecessary and the
|
||||
* algorithm to genertate test input IPv6 and associated expected
|
||||
* algorithm to generate test input IPv6 and associated expected
|
||||
* next_hop much simple.
|
||||
*/
|
||||
|
||||
|
@ -459,7 +459,7 @@ static int test_member_multimatch(void)
|
||||
MAX_MATCH, set_ids_cache);
|
||||
/*
|
||||
* For cache mode, keys overwrite when signature same.
|
||||
* the mutimatch should work like single match.
|
||||
* the multimatch should work like single match.
|
||||
*/
|
||||
TEST_ASSERT(ret_ht == M_MATCH_CNT && ret_vbf == M_MATCH_CNT &&
|
||||
ret_cache == 1,
|
||||
|
@ -304,7 +304,7 @@ static int test_mempool_single_consumer(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* test function for mempool test based on singple consumer and single producer,
|
||||
* test function for mempool test based on single consumer and single producer,
|
||||
* can run on one lcore only
|
||||
*/
|
||||
static int
|
||||
@ -322,7 +322,7 @@ my_mp_init(struct rte_mempool *mp, __rte_unused void *arg)
|
||||
}
|
||||
|
||||
/*
|
||||
* it tests the mempool operations based on singple producer and single consumer
|
||||
* it tests the mempool operations based on single producer and single consumer
|
||||
*/
|
||||
static int
|
||||
test_mempool_sp_sc(void)
|
||||
|
@ -543,7 +543,7 @@ test_memzone_reserve_max(void)
|
||||
}
|
||||
|
||||
if (mz->len != maxlen) {
|
||||
printf("Memzone reserve with 0 size did not return bigest block\n");
|
||||
printf("Memzone reserve with 0 size did not return biggest block\n");
|
||||
printf("Expected size = %zu, actual size = %zu\n",
|
||||
maxlen, mz->len);
|
||||
rte_dump_physmem_layout(stdout);
|
||||
@ -606,7 +606,7 @@ test_memzone_reserve_max_aligned(void)
|
||||
|
||||
if (mz->len < minlen || mz->len > maxlen) {
|
||||
printf("Memzone reserve with 0 size and alignment %u did not return"
|
||||
" bigest block\n", align);
|
||||
" biggest block\n", align);
|
||||
printf("Expected size = %zu-%zu, actual size = %zu\n",
|
||||
minlen, maxlen, mz->len);
|
||||
rte_dump_physmem_layout(stdout);
|
||||
@ -1054,7 +1054,7 @@ test_memzone_basic(void)
|
||||
if (mz != memzone1)
|
||||
return -1;
|
||||
|
||||
printf("test duplcate zone name\n");
|
||||
printf("test duplicate zone name\n");
|
||||
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
|
||||
SOCKET_ID_ANY, 0);
|
||||
if (mz != NULL)
|
||||
|
@ -109,7 +109,7 @@ test_setup(void)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Make a pool for cloned packeets */
|
||||
/* Make a pool for cloned packets */
|
||||
mp = rte_pktmbuf_pool_create_by_ops("pcapng_test_pool", NUM_PACKETS,
|
||||
0, 0,
|
||||
rte_pcapng_mbuf_size(pkt_len),
|
||||
|
@ -659,7 +659,7 @@ test_power_cpufreq(void)
|
||||
/* test of exit power management for an invalid lcore */
|
||||
ret = rte_power_exit(TEST_POWER_LCORE_INVALID);
|
||||
if (ret == 0) {
|
||||
printf("Unpectedly exit power management successfully for "
|
||||
printf("Unexpectedly exit power management successfully for "
|
||||
"lcore %u\n", TEST_POWER_LCORE_INVALID);
|
||||
rte_power_unset_env();
|
||||
return -1;
|
||||
|
@ -408,7 +408,7 @@ test_rcu_qsbr_synchronize_reader(void *arg)
|
||||
|
||||
/*
|
||||
* rte_rcu_qsbr_synchronize: Wait till all the reader threads have entered
|
||||
* the queiscent state.
|
||||
* the quiescent state.
|
||||
*/
|
||||
static int
|
||||
test_rcu_qsbr_synchronize(void)
|
||||
@ -443,7 +443,7 @@ test_rcu_qsbr_synchronize(void)
|
||||
rte_rcu_qsbr_synchronize(t[0], RTE_MAX_LCORE - 1);
|
||||
rte_rcu_qsbr_thread_offline(t[0], RTE_MAX_LCORE - 1);
|
||||
|
||||
/* Test if the API returns after unregisterng all the threads */
|
||||
/* Test if the API returns after unregistering all the threads */
|
||||
for (i = 0; i < RTE_MAX_LCORE; i++)
|
||||
rte_rcu_qsbr_thread_unregister(t[0], i);
|
||||
rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
|
||||
|
@ -1566,10 +1566,10 @@ static void ovfl_check_avg(uint32_t avg)
|
||||
}
|
||||
|
||||
static struct test_config ovfl_test1_config = {
|
||||
.ifname = "queue avergage overflow test interface",
|
||||
.ifname = "queue average overflow test interface",
|
||||
.msg = "overflow test 1 : use one RED configuration,\n"
|
||||
" increase average queue size to target level,\n"
|
||||
" check maximum number of bits requirte_red to represent avg_s\n\n",
|
||||
" check maximum number of bits required to represent avg_s\n\n",
|
||||
.htxt = "avg queue size "
|
||||
"wq_log2 "
|
||||
"fraction bits "
|
||||
@ -1757,12 +1757,12 @@ test_invalid_parameters(void)
|
||||
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
|
||||
return -1;
|
||||
}
|
||||
/* min_treshold == max_treshold */
|
||||
/* min_threshold == max_threshold */
|
||||
if (rte_red_config_init(&config, 0, 1, 1, 0) == 0) {
|
||||
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
|
||||
return -1;
|
||||
}
|
||||
/* min_treshold > max_treshold */
|
||||
/* min_threshold > max_threshold */
|
||||
if (rte_red_config_init(&config, 0, 2, 1, 0) == 0) {
|
||||
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
|
||||
return -1;
|
||||
|
@ -237,7 +237,7 @@
|
||||
* increases .called counter. Function returns value stored in .ret field
|
||||
* of the structure.
|
||||
* In case of some parameters in some functions the expected value is unknown
|
||||
* and cannot be detrmined prior to call. Such parameters are stored
|
||||
* and cannot be determined prior to call. Such parameters are stored
|
||||
* in structure and can be compared or analyzed later in test case code.
|
||||
*
|
||||
* Below structures and functions follow the rules just described.
|
||||
|
@ -364,7 +364,7 @@ setup_pipeline(int test_type)
|
||||
.action = RTE_PIPELINE_ACTION_PORT,
|
||||
{.port_id = port_out_id[i^1]},
|
||||
};
|
||||
printf("Setting secont table to output to port\n");
|
||||
printf("Setting second table to output to port\n");
|
||||
|
||||
/* Add the default action for the table. */
|
||||
ret = rte_pipeline_table_default_entry_add(p,
|
||||
|
@ -684,7 +684,7 @@ test_predictable_rss_multirange(void)
|
||||
|
||||
/*
|
||||
* calculate hashes, complements, then adjust keys with
|
||||
* complements and recalsulate hashes
|
||||
* complements and recalculate hashes
|
||||
*/
|
||||
for (i = 0; i < RTE_DIM(rng_arr); i++) {
|
||||
for (k = 0; k < 100; k++) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
#! /usr/bin/env python3
|
||||
# SPDX-License-Identitifer: BSD-3-Clause
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright(c) 2020 Intel Corporation
|
||||
|
||||
import subprocess
|
||||
|
@ -25,7 +25,7 @@ build_map_changes()
|
||||
|
||||
# Triggering this rule, which starts a line and ends it
|
||||
# with a { identifies a versioned section. The section name is
|
||||
# the rest of the line with the + and { symbols remvoed.
|
||||
# the rest of the line with the + and { symbols removed.
|
||||
# Triggering this rule sets in_sec to 1, which actives the
|
||||
# symbol rule below
|
||||
/^.*{/ {
|
||||
@ -35,7 +35,7 @@ build_map_changes()
|
||||
}
|
||||
}
|
||||
|
||||
# This rule idenfies the end of a section, and disables the
|
||||
# This rule identifies the end of a section, and disables the
|
||||
# symbol rule
|
||||
/.*}/ {in_sec=0}
|
||||
|
||||
@ -100,7 +100,7 @@ check_for_rule_violations()
|
||||
# Just inform the user of this occurrence, but
|
||||
# don't flag it as an error
|
||||
echo -n "INFO: symbol $symname is added but "
|
||||
echo -n "patch has insuficient context "
|
||||
echo -n "patch has insufficient context "
|
||||
echo -n "to determine the section name "
|
||||
echo -n "please ensure the version is "
|
||||
echo "EXPERIMENTAL"
|
||||
|
@ -465,7 +465,7 @@
|
||||
v:mID="63"
|
||||
id="shape63-63"><title
|
||||
id="title149">Sheet.63</title><desc
|
||||
id="desc151">Contanier/App</desc><v:textBlock
|
||||
id="desc151">Container/App</desc><v:textBlock
|
||||
v:margins="rect(4,4,4,4)" /><v:textRect
|
||||
height="22.5"
|
||||
width="90"
|
||||
|
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB |
@ -9,7 +9,7 @@ packets. This Linux-specific PMD binds to an AF_PACKET socket and allows
|
||||
a DPDK application to send and receive raw packets through the Kernel.
|
||||
|
||||
In order to improve Rx and Tx performance this implementation makes use of
|
||||
PACKET_MMAP, which provides a mmap'ed ring buffer, shared between user space
|
||||
PACKET_MMAP, which provides a mmapped ring buffer, shared between user space
|
||||
and kernel, that's used to send and receive packets. This helps reducing system
|
||||
calls and the copies needed between user space and Kernel.
|
||||
|
||||
|
@ -178,7 +178,7 @@ DPDK and must be installed separately:
|
||||
|
||||
- mlx4_core: hardware driver managing Mellanox ConnectX-3 devices.
|
||||
- mlx4_en: Ethernet device driver that provides kernel network interfaces.
|
||||
- mlx4_ib: InifiniBand device driver.
|
||||
- mlx4_ib: InfiniBand device driver.
|
||||
- ib_uverbs: user space driver for verbs (entry point for libibverbs).
|
||||
|
||||
- **Firmware update**
|
||||
|
@ -649,7 +649,7 @@ Driver options
|
||||
|
||||
A timeout value is set in the driver to control the waiting time before
|
||||
dropping a packet. Once the timer is expired, the delay drop will be
|
||||
deactivated for all the Rx queues with this feature enable. To re-activeate
|
||||
deactivated for all the Rx queues with this feature enable. To re-activate
|
||||
it, a rearming is needed and it is part of the kernel driver starting from
|
||||
OFED 5.5.
|
||||
|
||||
@ -1033,7 +1033,7 @@ Driver options
|
||||
|
||||
For the MARK action the last 16 values in the full range are reserved for
|
||||
internal PMD purposes (to emulate FLAG action). The valid range for the
|
||||
MARK action values is 0-0xFFEF for the 16-bit mode and 0-xFFFFEF
|
||||
MARK action values is 0-0xFFEF for the 16-bit mode and 0-0xFFFFEF
|
||||
for the 24-bit mode, the flows with the MARK action value outside
|
||||
the specified range will be rejected.
|
||||
|
||||
@ -1317,7 +1317,7 @@ DPDK and must be installed separately:
|
||||
- mlx5_core: hardware driver managing Mellanox
|
||||
ConnectX-4/ConnectX-5/ConnectX-6/BlueField devices and related Ethernet kernel
|
||||
network devices.
|
||||
- mlx5_ib: InifiniBand device driver.
|
||||
- mlx5_ib: InfiniBand device driver.
|
||||
- ib_uverbs: user space driver for Verbs (entry point for libibverbs).
|
||||
|
||||
- **Firmware update**
|
||||
|
@ -751,7 +751,7 @@ feature is useful when the user wants to abandon partially enqueued operations
|
||||
for a failed enqueue burst operation and try enqueuing in a whole later.
|
||||
|
||||
Similar as enqueue, there are two dequeue functions:
|
||||
``rte_cryptodev_raw_dequeue`` for dequeing single operation, and
|
||||
``rte_cryptodev_raw_dequeue`` for dequeuing single operation, and
|
||||
``rte_cryptodev_raw_dequeue_burst`` for dequeuing a burst of operations (e.g.
|
||||
all operations in a ``struct rte_crypto_sym_vec`` descriptor). The
|
||||
``rte_cryptodev_raw_dequeue_burst`` function allows the user to provide callback
|
||||
|
@ -433,7 +433,7 @@ and decides on a preferred IOVA mode.
|
||||
|
||||
- if all buses report RTE_IOVA_PA, then the preferred IOVA mode is RTE_IOVA_PA,
|
||||
- if all buses report RTE_IOVA_VA, then the preferred IOVA mode is RTE_IOVA_VA,
|
||||
- if all buses report RTE_IOVA_DC, no bus expressed a preferrence, then the
|
||||
- if all buses report RTE_IOVA_DC, no bus expressed a preference, then the
|
||||
preferred mode is RTE_IOVA_DC,
|
||||
- if the buses disagree (at least one wants RTE_IOVA_PA and at least one wants
|
||||
RTE_IOVA_VA), then the preferred IOVA mode is RTE_IOVA_DC (see below with the
|
||||
@ -658,7 +658,7 @@ Known Issues
|
||||
+ rte_ring
|
||||
|
||||
rte_ring supports multi-producer enqueue and multi-consumer dequeue.
|
||||
However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptable.
|
||||
However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptible.
|
||||
|
||||
.. note::
|
||||
|
||||
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 30 KiB |
@ -460,7 +460,7 @@
|
||||
height="14.642858"
|
||||
x="39.285713"
|
||||
y="287.16254" /></flowRegion><flowPara
|
||||
id="flowPara4817">offse</flowPara></flowRoot> <text
|
||||
id="flowPara4817">offset</flowPara></flowRoot> <text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
|
||||
x="74.16684"
|
||||
|
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 65 KiB |
@ -649,7 +649,7 @@
|
||||
height="14.642858"
|
||||
x="39.285713"
|
||||
y="287.16254" /></flowRegion><flowPara
|
||||
id="flowPara4817">offse</flowPara></flowRoot> <text
|
||||
id="flowPara4817">offset</flowPara></flowRoot> <text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
|
||||
x="16.351753"
|
||||
|
Before Width: | Height: | Size: 87 KiB After Width: | Height: | Size: 87 KiB |
@ -1196,12 +1196,12 @@ In the case of severe congestion, the dropper resorts to tail drop.
|
||||
This occurs when a packet queue has reached maximum capacity and cannot store any more packets.
|
||||
In this situation, all arriving packets are dropped.
|
||||
|
||||
The flow through the dropper is illustrated in :numref:`figure_flow_tru_droppper`.
|
||||
The flow through the dropper is illustrated in :numref:`figure_flow_tru_dropper`.
|
||||
The RED/WRED/PIE algorithm is exercised first and tail drop second.
|
||||
|
||||
.. _figure_flow_tru_droppper:
|
||||
.. _figure_flow_tru_dropper:
|
||||
|
||||
.. figure:: img/flow_tru_droppper.*
|
||||
.. figure:: img/flow_tru_dropper.*
|
||||
|
||||
Flow Through the Dropper
|
||||
|
||||
|
@ -1379,7 +1379,7 @@ Matches a network service header (RFC 8300).
|
||||
- ``ttl``: maximum SFF hopes (6 bits).
|
||||
- ``length``: total length in 4 bytes words (6 bits).
|
||||
- ``reserved1``: reserved1 bits (4 bits).
|
||||
- ``mdtype``: ndicates format of NSH header (4 bits).
|
||||
- ``mdtype``: indicates format of NSH header (4 bits).
|
||||
- ``next_proto``: indicates protocol type of encap data (8 bits).
|
||||
- ``spi``: service path identifier (3 bytes).
|
||||
- ``sindex``: service index (1 byte).
|
||||
|
@ -37,7 +37,7 @@ using ``rte_rawdev_queue_conf_get()``.
|
||||
|
||||
To perform data transfer use standard ``rte_rawdev_enqueue_buffers()`` and
|
||||
``rte_rawdev_dequeue_buffers()`` APIs. Not all messages produce sensible
|
||||
responses hence dequeueing is not always necessary.
|
||||
responses hence dequeuing is not always necessary.
|
||||
|
||||
BPHY CGX/RPM PMD
|
||||
----------------
|
||||
|
@ -22,7 +22,7 @@ PCRE back tracking ctrl
|
||||
Support PCRE back tracking ctrl.
|
||||
|
||||
PCRE call outs
|
||||
Support PCRE call outes.
|
||||
Support PCRE call routes.
|
||||
|
||||
PCRE forward reference
|
||||
Support Forward reference.
|
||||
|
@ -192,7 +192,7 @@ EAL
|
||||
|
||||
* **igb_uio: Fixed possible mmap failure for Linux >= 4.5.**
|
||||
|
||||
The mmaping of the iomem range of the PCI device fails for kernels that
|
||||
The mmapping of the iomem range of the PCI device fails for kernels that
|
||||
enabled the ``CONFIG_IO_STRICT_DEVMEM`` option. The error seen by the
|
||||
user is as similar to the following::
|
||||
|
||||
|
@ -232,7 +232,7 @@ API Changes
|
||||
* The ``rte_cryptodev_configure()`` function does not create the session
|
||||
mempool for the device anymore.
|
||||
* The ``rte_cryptodev_queue_pair_attach_sym_session()`` and
|
||||
``rte_cryptodev_queue_pair_dettach_sym_session()`` functions require
|
||||
``rte_cryptodev_queue_pair_detach_sym_session()`` functions require
|
||||
the new parameter ``device id``.
|
||||
* Parameters of ``rte_cryptodev_sym_session_create()`` were modified to
|
||||
accept ``mempool``, instead of ``device id`` and ``rte_crypto_sym_xform``.
|
||||
|
@ -671,7 +671,7 @@ Resolved Issues
|
||||
value 0.
|
||||
|
||||
|
||||
Fixes: 40b966a211ab ("ivshmem: library changes for mmaping using ivshmem")
|
||||
Fixes: 40b966a211ab ("ivshmem: library changes for mmapping using ivshmem")
|
||||
|
||||
|
||||
* **ixgbe/base: Fix SFP probing.**
|
||||
|
@ -154,8 +154,8 @@ each RX queue uses its own mempool.
|
||||
|
||||
.. literalinclude:: ../../../examples/ip_reassembly/main.c
|
||||
:language: c
|
||||
:start-after: mbufs stored int the gragment table. 8<
|
||||
:end-before: >8 End of mbufs stored int the fragmentation table.
|
||||
:start-after: mbufs stored in the fragment table. 8<
|
||||
:end-before: >8 End of mbufs stored in the fragmentation table.
|
||||
:dedent: 1
|
||||
|
||||
Packet Reassembly and Forwarding
|
||||
|
@ -176,7 +176,7 @@ function. The value returned is the number of parsed arguments:
|
||||
.. literalinclude:: ../../../examples/l2fwd-cat/l2fwd-cat.c
|
||||
:language: c
|
||||
:start-after: Initialize the Environment Abstraction Layer (EAL). 8<
|
||||
:end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
|
||||
:end-before: >8 End of initialization the Environment Abstraction Layer (EAL).
|
||||
:dedent: 1
|
||||
|
||||
The next task is to initialize the PQoS library and configure CAT. The
|
||||
|
@ -191,7 +191,7 @@ flow is not handled by the node.
|
||||
.. literalinclude:: ../../../examples/server_node_efd/node/node.c
|
||||
:language: c
|
||||
:start-after: Packets dequeued from the shared ring. 8<
|
||||
:end-before: >8 End of packets dequeueing.
|
||||
:end-before: >8 End of packets dequeuing.
|
||||
|
||||
Finally, note that both processes updates statistics, such as transmitted, received
|
||||
and dropped packets, which are shown and refreshed by the server app.
|
||||
|
@ -54,7 +54,7 @@ function. The value returned is the number of parsed arguments:
|
||||
.. literalinclude:: ../../../examples/skeleton/basicfwd.c
|
||||
:language: c
|
||||
:start-after: Initializion the Environment Abstraction Layer (EAL). 8<
|
||||
:end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
|
||||
:end-before: >8 End of initialization the Environment Abstraction Layer (EAL).
|
||||
:dedent: 1
|
||||
|
||||
|
||||
|
@ -681,7 +681,7 @@ The following is an example JSON string for a power management request.
|
||||
"resource_id": 10
|
||||
}}
|
||||
|
||||
To query the available frequences of an lcore, use the query_cpu_freq command.
|
||||
To query the available frequencies of an lcore, use the query_cpu_freq command.
|
||||
Where {core_num} is the lcore to query.
|
||||
Before using this command, please enable responses via the set_query command on the host.
|
||||
|
||||
|
@ -3510,7 +3510,7 @@ Tunnel offload
|
||||
Indicate tunnel offload rule type
|
||||
|
||||
- ``tunnel_set {tunnel_id}``: mark rule as tunnel offload decap_set type.
|
||||
- ``tunnel_match {tunnel_id}``: mark rule as tunel offload match type.
|
||||
- ``tunnel_match {tunnel_id}``: mark rule as tunnel offload match type.
|
||||
|
||||
Matching pattern
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
@ -2097,7 +2097,7 @@ dequeue_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
|
||||
rte_bbdev_log_debug("DMA response desc %p", desc);
|
||||
|
||||
*op = desc->enc_req.op_addr;
|
||||
/* Check the decriptor error field, return 1 on error */
|
||||
/* Check the descriptor error field, return 1 on error */
|
||||
desc_error = check_desc_error(desc->enc_req.error);
|
||||
(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
|
||||
|
||||
@ -2139,7 +2139,7 @@ dequeue_enc_one_op_tb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
|
||||
for (cb_idx = 0; cb_idx < cbs_in_op; ++cb_idx) {
|
||||
desc = q->ring_addr + ((q->head_free_desc + desc_offset +
|
||||
cb_idx) & q->sw_ring_wrap_mask);
|
||||
/* Check the decriptor error field, return 1 on error */
|
||||
/* Check the descriptor error field, return 1 on error */
|
||||
desc_error = check_desc_error(desc->enc_req.error);
|
||||
status |= desc_error << RTE_BBDEV_DATA_ERROR;
|
||||
rte_bbdev_log_debug("DMA response desc %p", desc);
|
||||
@ -2177,7 +2177,7 @@ dequeue_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
|
||||
(*op)->turbo_dec.iter_count = (desc->dec_req.iter + 2) >> 1;
|
||||
/* crc_pass = 0 when decoder fails */
|
||||
(*op)->status = !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;
|
||||
/* Check the decriptor error field, return 1 on error */
|
||||
/* Check the descriptor error field, return 1 on error */
|
||||
desc_error = check_desc_error(desc->enc_req.error);
|
||||
(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
|
||||
return 1;
|
||||
@ -2221,7 +2221,7 @@ dequeue_dec_one_op_tb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
|
||||
iter_count = RTE_MAX(iter_count, (uint8_t) desc->dec_req.iter);
|
||||
/* crc_pass = 0 when decoder fails, one fails all */
|
||||
status |= !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;
|
||||
/* Check the decriptor error field, return 1 on error */
|
||||
/* Check the descriptor error field, return 1 on error */
|
||||
desc_error = check_desc_error(desc->enc_req.error);
|
||||
status |= desc_error << RTE_BBDEV_DATA_ERROR;
|
||||
rte_bbdev_log_debug("DMA response desc %p", desc);
|
||||
|
@ -31,7 +31,7 @@ struct bbdev_null_params {
|
||||
uint16_t queues_num; /*< Null BBDEV queues number */
|
||||
};
|
||||
|
||||
/* Accecptable params for null BBDEV devices */
|
||||
/* Acceptable params for null BBDEV devices */
|
||||
#define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues"
|
||||
#define BBDEV_NULL_SOCKET_ID_ARG "socket_id"
|
||||
|
||||
|
@ -61,7 +61,7 @@ struct turbo_sw_params {
|
||||
uint16_t queues_num; /*< Turbo SW device queues number */
|
||||
};
|
||||
|
||||
/* Accecptable params for Turbo SW devices */
|
||||
/* Acceptable params for Turbo SW devices */
|
||||
#define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues"
|
||||
#define TURBO_SW_SOCKET_ID_ARG "socket_id"
|
||||
|
||||
|
@ -70,7 +70,7 @@ compare_dpaa_devices(struct rte_dpaa_device *dev1,
|
||||
{
|
||||
int comp = 0;
|
||||
|
||||
/* Segragating ETH from SEC devices */
|
||||
/* Segregating ETH from SEC devices */
|
||||
if (dev1->device_type > dev2->device_type)
|
||||
comp = 1;
|
||||
else if (dev1->device_type < dev2->device_type)
|
||||
|
@ -1353,7 +1353,7 @@ __rte_internal
|
||||
int qman_irqsource_add(u32 bits);
|
||||
|
||||
/**
|
||||
* qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it
|
||||
* qman_fq_portal_irqsource_add - similar to qman_irqsource_add, but it
|
||||
* takes portal (fq specific) as input rather than using the thread affined
|
||||
* portal.
|
||||
*/
|
||||
@ -1416,7 +1416,7 @@ __rte_internal
|
||||
struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
|
||||
|
||||
/**
|
||||
* qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
|
||||
* qman_dqrr_consume - Consume the DQRR entry after volatile dequeue
|
||||
* @fq: Frame Queue on which the volatile dequeue command is issued
|
||||
* @dq: DQRR entry to consume. This is the one which is provided by the
|
||||
* 'qbman_dequeue' command.
|
||||
@ -2017,7 +2017,7 @@ int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
|
||||
* @cgr: the 'cgr' object to deregister
|
||||
*
|
||||
* "Unplugs" this CGR object from the portal affine to the cpu on which this API
|
||||
* is executed. This must be excuted on the same affine portal on which it was
|
||||
* is executed. This must be executed on the same affine portal on which it was
|
||||
* created.
|
||||
*/
|
||||
__rte_internal
|
||||
|
@ -40,7 +40,7 @@ struct dpaa_raw_portal {
|
||||
/* Specifies the stash request queue this portal should use */
|
||||
uint8_t sdest;
|
||||
|
||||
/* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
|
||||
/* Specifies a specific portal index to map or QBMAN_ANY_PORTAL_IDX
|
||||
* for don't care. The portal index will be populated by the
|
||||
* driver when the ioctl() successfully completes.
|
||||
*/
|
||||
|
@ -49,7 +49,7 @@ struct dpaa_portal_map {
|
||||
struct dpaa_ioctl_portal_map {
|
||||
/* Input parameter, is a qman or bman portal required. */
|
||||
enum dpaa_portal_type type;
|
||||
/* Specifes a specific portal index to map or 0xffffffff
|
||||
/* Specifies a specific portal index to map or 0xffffffff
|
||||
* for don't care.
|
||||
*/
|
||||
uint32_t index;
|
||||
|
@ -539,7 +539,7 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
|
||||
|
||||
fslmc_bus = driver->fslmc_bus;
|
||||
|
||||
/* Cleanup the PA->VA Translation table; From whereever this function
|
||||
/* Cleanup the PA->VA Translation table; From wherever this function
|
||||
* is called from.
|
||||
*/
|
||||
if (rte_eal_iova_mode() == RTE_IOVA_PA)
|
||||
|
@ -178,7 +178,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
|
||||
dpio_epoll_fd = epoll_create(1);
|
||||
ret = rte_dpaa2_intr_enable(dpio_dev->intr_handle, 0);
|
||||
if (ret) {
|
||||
DPAA2_BUS_ERR("Interrupt registeration failed");
|
||||
DPAA2_BUS_ERR("Interrupt registration failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ struct dpaa2_queue {
|
||||
struct rte_cryptodev_data *crypto_data;
|
||||
};
|
||||
uint32_t fqid; /*!< Unique ID of this queue */
|
||||
uint16_t flow_id; /*!< To be used by DPAA2 frmework */
|
||||
uint16_t flow_id; /*!< To be used by DPAA2 framework */
|
||||
uint8_t tc_index; /*!< traffic class identifier */
|
||||
uint8_t cgid; /*! < Congestion Group id for this queue */
|
||||
uint64_t rx_pkts;
|
||||
|
@ -510,7 +510,7 @@ int qbman_result_has_new_result(struct qbman_swp *s,
|
||||
struct qbman_result *dq);
|
||||
|
||||
/**
|
||||
* qbman_check_command_complete() - Check if the previous issued dq commnd
|
||||
* qbman_check_command_complete() - Check if the previous issued dq command
|
||||
* is completed and results are available in memory.
|
||||
* @s: the software portal object.
|
||||
* @dq: the dequeue result read from the memory.
|
||||
@ -687,7 +687,7 @@ uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
|
||||
|
||||
/**
|
||||
* qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
|
||||
* odpid is valid only if ODPVAILD flag is TRUE.
|
||||
* odpid is valid only if ODPVALID flag is TRUE.
|
||||
* @dq: the dequeue result.
|
||||
*
|
||||
* Return odpid.
|
||||
@ -743,7 +743,7 @@ const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
|
||||
* qbman_result_SCN_state() - Get the state field in State-change notification
|
||||
* @scn: the state change notification.
|
||||
*
|
||||
* Return the state in the notifiation.
|
||||
* Return the state in the notification.
|
||||
*/
|
||||
__rte_internal
|
||||
uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
|
||||
@ -825,7 +825,7 @@ uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
|
||||
|
||||
/* Parsing CGCU */
|
||||
/**
|
||||
* qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
|
||||
* qbman_result_cgcu_cgid() - Check CGCU resource id, i.e. cgid
|
||||
* @scn: the state change notification.
|
||||
*
|
||||
* Return the CGCU resource id.
|
||||
@ -903,14 +903,14 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d);
|
||||
__rte_internal
|
||||
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
|
||||
/**
|
||||
* qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
|
||||
* qbman_eq_desc_set_orp() - Set order-restoration in the enqueue descriptor
|
||||
* @d: the enqueue descriptor.
|
||||
* @response_success: 1 = enqueue with response always; 0 = enqueue with
|
||||
* rejections returned on a FQ.
|
||||
* @opr_id: the order point record id.
|
||||
* @seqnum: the order restoration sequence number.
|
||||
* @incomplete: indiates whether this is the last fragments using the same
|
||||
* sequeue number.
|
||||
* @incomplete: indicates whether this is the last fragments using the same
|
||||
* sequence number.
|
||||
*/
|
||||
__rte_internal
|
||||
void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
|
||||
@ -1052,10 +1052,10 @@ __rte_internal
|
||||
uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
|
||||
|
||||
/**
|
||||
* qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
|
||||
* qbman_result_eqresp_rc() - determines if enqueue command is successful.
|
||||
* @eqresp: enqueue response.
|
||||
*
|
||||
* Return 0 when command is sucessful.
|
||||
* Return 0 when command is successful.
|
||||
*/
|
||||
__rte_internal
|
||||
uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
|
||||
@ -1250,7 +1250,7 @@ int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
|
||||
/**
|
||||
* These functions change the FQ flow-control stuff between XON/XOFF. (The
|
||||
* default is XON.) This setting doesn't affect enqueues to the FQ, just
|
||||
* dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
|
||||
* dequeues. XOFF FQs will remain in the tentatively-scheduled state, even when
|
||||
* non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
|
||||
* changed to XOFF after it had already become truly-scheduled to a channel, and
|
||||
* a pull dequeue of that channel occurs that selects that FQ for dequeuing,
|
||||
|
@ -815,7 +815,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* skip non-mmapable BARs */
|
||||
/* skip non-mmappable BARs */
|
||||
if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
|
||||
free(reg);
|
||||
continue;
|
||||
|
@ -197,7 +197,7 @@ rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
|
||||
int rte_vdev_init(const char *name, const char *args);
|
||||
|
||||
/**
|
||||
* Uninitalize a driver specified by name.
|
||||
* Uninitialize a driver specified by name.
|
||||
*
|
||||
* @param name
|
||||
* The pointer to a driver name to be uninitialized.
|
||||
|
@ -134,7 +134,7 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
|
||||
|
||||
/*
|
||||
* If device class GUID matches, call the probe function of
|
||||
* registere drivers for the vmbus device.
|
||||
* register drivers for the vmbus device.
|
||||
* Return -1 if initialization failed,
|
||||
* and 1 if no driver found for this device.
|
||||
*/
|
||||
|
@ -14,7 +14,7 @@
|
||||
#define CGX_CMRX_INT_OVERFLW BIT_ULL(1)
|
||||
/*
|
||||
* CN10K stores number of lmacs in 4 bit filed
|
||||
* in contraty to CN9K which uses only 3 bits.
|
||||
* in contrary to CN9K which uses only 3 bits.
|
||||
*
|
||||
* In theory masks should differ yet on CN9K
|
||||
* bits beyond specified range contain zeros.
|
||||
|
@ -138,7 +138,7 @@ nix_lf_bpf_dump(__io struct nix_band_prof_s *bpf)
|
||||
{
|
||||
plt_dump("W0: cir_mantissa \t\t\t%d\nW0: pebs_mantissa \t\t\t0x%03x",
|
||||
bpf->cir_mantissa, bpf->pebs_mantissa);
|
||||
plt_dump("W0: peir_matissa \t\t\t\t%d\nW0: cbs_exponent \t\t\t%d",
|
||||
plt_dump("W0: peir_mantissa \t\t\t\t%d\nW0: cbs_exponent \t\t\t%d",
|
||||
bpf->peir_mantissa, bpf->cbs_exponent);
|
||||
plt_dump("W0: cir_exponent \t\t\t%d\nW0: pebs_exponent \t\t\t%d",
|
||||
bpf->cir_exponent, bpf->pebs_exponent);
|
||||
|
@ -107,7 +107,7 @@ nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
|
||||
if (profile->peak.rate && min_rate > profile->peak.rate)
|
||||
min_rate = profile->peak.rate;
|
||||
|
||||
/* Each packet accomulate single count, whereas HW
|
||||
/* Each packet accumulate single count, whereas HW
|
||||
* considers each unit as Byte, so we need convert
|
||||
* user pps to bps
|
||||
*/
|
||||
|
@ -234,7 +234,7 @@ npc_get_kex_capability(struct npc *npc)
|
||||
/* Ethtype: Offset 12B, len 2B */
|
||||
kex_cap.bit.ethtype_0 = npc_is_kex_enabled(
|
||||
npc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);
|
||||
/* QINQ VLAN Ethtype: ofset 8B, len 2B */
|
||||
/* QINQ VLAN Ethtype: offset 8B, len 2B */
|
||||
kex_cap.bit.ethtype_x = npc_is_kex_enabled(
|
||||
npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);
|
||||
/* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */
|
||||
|
@ -363,7 +363,7 @@ struct npc {
|
||||
uint32_t rss_grps; /* rss groups supported */
|
||||
uint16_t flow_prealloc_size; /* Pre allocated mcam size */
|
||||
uint16_t flow_max_priority; /* Max priority for flow */
|
||||
uint16_t switch_header_type; /* Suppprted switch header type */
|
||||
uint16_t switch_header_type; /* Supported switch header type */
|
||||
uint32_t mark_actions; /* Number of mark actions */
|
||||
uint32_t vtag_strip_actions; /* vtag insert/strip actions */
|
||||
uint16_t pf_func; /* pf_func of device */
|
||||
|
@ -246,7 +246,7 @@ cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
|
||||
if (cpt_ctx->fc_type == FC_GEN) {
|
||||
/*
|
||||
* We need to always say IV is from DPTR as user can
|
||||
* sometimes iverride IV per operation.
|
||||
* sometimes override IV per operation.
|
||||
*/
|
||||
fctx->enc.iv_source = CPT_FROM_DPTR;
|
||||
|
||||
@ -3035,7 +3035,7 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
|
||||
tailroom = rte_pktmbuf_tailroom(pkt);
|
||||
if (likely((headroom >= 24) &&
|
||||
(tailroom >= 8))) {
|
||||
/* In 83XX this is prerequivisit for Direct mode */
|
||||
/* In 83XX this is prerequisite for Direct mode */
|
||||
*flags |= SINGLE_BUF_HEADTAILROOM;
|
||||
}
|
||||
param->bufs[0].vaddr = seg_data;
|
||||
|
@ -779,7 +779,7 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
|
||||
* Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
|
||||
* ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
|
||||
* prime len, order len)).
|
||||
* Please note sign, public key and order can not excede prime length
|
||||
* Please note sign, public key and order can not exceed prime length
|
||||
* i.e. 6 * p_align
|
||||
*/
|
||||
dlen = sizeof(fpm_table_iova) + m_align + (8 * p_align);
|
||||
|
@ -67,7 +67,7 @@ cnstr_shdsc_zuce(uint32_t *descbuf, bool ps, bool swap,
|
||||
* @authlen: size of digest
|
||||
*
|
||||
* The IV prepended before hmac payload must be 8 bytes consisting
|
||||
* of COUNT||BEAERER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
|
||||
* of COUNT||BEARER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
|
||||
* direction is of 1 bit - totalling to 38 bits.
|
||||
*
|
||||
* Return: size of descriptor written in words or negative number on error
|
||||
|
@ -492,10 +492,10 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
|
||||
|
||||
/* Set the variable size of data the register will write */
|
||||
if (dir == OP_TYPE_ENCAP_PROTOCOL) {
|
||||
/* We will add the interity data so add its length */
|
||||
/* We will add the integrity data so add its length */
|
||||
MATHI(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
|
||||
} else {
|
||||
/* We will check the interity data so remove its length */
|
||||
/* We will check the integrity data so remove its length */
|
||||
MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
|
||||
/* Do not take the ICV in the out-snooping configuration */
|
||||
MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4, IMMED2);
|
||||
@ -803,7 +803,7 @@ static inline int pdcp_sdap_insert_no_snoop_op(
|
||||
CLRW_CLR_C1MODE,
|
||||
CLRW, 0, 4, IMMED);
|
||||
|
||||
/* Load the key for authentcation */
|
||||
/* Load the key for authentication */
|
||||
KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
|
||||
authdata->keylen, INLINE_KEY(authdata));
|
||||
|
||||
|
@ -261,7 +261,7 @@ dpaax_iova_table_depopulate(void)
|
||||
rte_free(dpaax_iova_table_p->entries);
|
||||
dpaax_iova_table_p = NULL;
|
||||
|
||||
DPAAX_DEBUG("IOVA Table cleanedup");
|
||||
DPAAX_DEBUG("IOVA Table cleaned");
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1006,7 +1006,7 @@ struct iavf_profile_tlv_section_record {
|
||||
u8 data[12];
|
||||
};
|
||||
|
||||
/* Generic AQ section in proflie */
|
||||
/* Generic AQ section in profile */
|
||||
struct iavf_profile_aq_section {
|
||||
u16 opcode;
|
||||
u16 flags;
|
||||
|
@ -233,7 +233,7 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
|
||||
case VIRTCHNL_OP_DCF_CMD_DESC:
|
||||
return "VIRTCHNL_OP_DCF_CMD_DESC";
|
||||
case VIRTCHNL_OP_DCF_CMD_BUFF:
|
||||
return "VIRTCHHNL_OP_DCF_CMD_BUFF";
|
||||
return "VIRTCHNL_OP_DCF_CMD_BUFF";
|
||||
case VIRTCHNL_OP_DCF_DISABLE:
|
||||
return "VIRTCHNL_OP_DCF_DISABLE";
|
||||
case VIRTCHNL_OP_DCF_GET_VSI_MAP:
|
||||
|
@ -854,7 +854,7 @@ static void mlx5_common_driver_init(void)
|
||||
static bool mlx5_common_initialized;
|
||||
|
||||
/**
|
||||
* One time innitialization routine for run-time dependency on glue library
|
||||
* One time initialization routine for run-time dependency on glue library
|
||||
* for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
|
||||
* must invoke in its constructor.
|
||||
*/
|
||||
|
@ -1541,7 +1541,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,
|
||||
* Destroy a mempool registration object.
|
||||
*
|
||||
* @param standalone
|
||||
* Whether @p mpr owns its MRs excludively, i.e. they are not shared.
|
||||
* Whether @p mpr owns its MRs exclusively, i.e. they are not shared.
|
||||
*/
|
||||
static void
|
||||
mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
|
||||
|
@ -1822,7 +1822,7 @@ mlx5_devx_cmd_create_td(void *ctx)
|
||||
* Pointer to file stream.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a nagative value otherwise.
|
||||
* 0 on success, a negative value otherwise.
|
||||
*/
|
||||
int
|
||||
mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
|
||||
|
@ -58,7 +58,7 @@ static struct mlx5_sys_mem mlx5_sys_mem = {
|
||||
* Check if the address belongs to memory seg list.
|
||||
*
|
||||
* @param addr
|
||||
* Memory address to be ckeced.
|
||||
* Memory address to be checked.
|
||||
* @param msl
|
||||
* Memory seg list.
|
||||
*
|
||||
@ -109,7 +109,7 @@ mlx5_mem_update_msl(void *addr)
|
||||
* Check if the address belongs to rte memory.
|
||||
*
|
||||
* @param addr
|
||||
* Memory address to be ckeced.
|
||||
* Memory address to be checked.
|
||||
*
|
||||
* @return
|
||||
* True if it belongs, false otherwise.
|
||||
|
@ -19,7 +19,7 @@ extern "C" {
|
||||
|
||||
enum mlx5_mem_flags {
|
||||
MLX5_MEM_ANY = 0,
|
||||
/* Memory will be allocated dpends on sys_mem_en. */
|
||||
/* Memory will be allocated depends on sys_mem_en. */
|
||||
MLX5_MEM_SYS = 1 << 0,
|
||||
/* Memory should be allocated from system. */
|
||||
MLX5_MEM_RTE = 1 << 1,
|
||||
|
@ -4172,7 +4172,7 @@ mlx5_flow_mark_get(uint32_t val)
|
||||
* timestamp format supported by the queue.
|
||||
*
|
||||
* @return
|
||||
* Converted timstamp format settings.
|
||||
* Converted timestamp format settings.
|
||||
*/
|
||||
static inline uint32_t
|
||||
mlx5_ts_format_conv(uint32_t ts_format)
|
||||
|
@ -302,7 +302,7 @@ mlx5_os_umem_dereg(void *pumem)
|
||||
}
|
||||
|
||||
/**
|
||||
* Register mr. Given protection doamin pointer, pointer to addr and length
|
||||
* Register mr. Given protection domain pointer, pointer to addr and length
|
||||
* register the memory region.
|
||||
*
|
||||
* @param[in] pd
|
||||
@ -310,7 +310,7 @@ mlx5_os_umem_dereg(void *pumem)
|
||||
* @param[in] addr
|
||||
* Pointer to memory start address (type devx_device_ctx).
|
||||
* @param[in] length
|
||||
* Lengtoh of the memory to register.
|
||||
* Length of the memory to register.
|
||||
* @param[out] pmd_mr
|
||||
* pmd_mr struct set with lkey, address, length, pointer to mr object, mkey
|
||||
*
|
||||
|
@ -21,7 +21,7 @@
|
||||
/**
|
||||
* This API allocates aligned or non-aligned memory. The free can be on either
|
||||
* aligned or nonaligned memory. To be protected - even though there may be no
|
||||
* alignment - in Windows this API will unconditioanlly call _aligned_malloc()
|
||||
* alignment - in Windows this API will unconditionally call _aligned_malloc()
|
||||
* with at least a minimal alignment size.
|
||||
*
|
||||
* @param[in] align
|
||||
|
@ -72,7 +72,7 @@
|
||||
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
|
||||
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
|
||||
|
||||
/* Minimum ring bufer size for memory allocation */
|
||||
/* Minimum ring buffer size for memory allocation */
|
||||
#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
|
||||
ADF_RING_SIZE_4K : SIZE)
|
||||
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
|
||||
|
@ -616,7 +616,7 @@ typedef struct efsys_bar_s {
|
||||
|
||||
#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
|
||||
|
||||
/* Just avoid store and compiler (impliciltly) reordering */
|
||||
/* Just avoid store and compiler (implicitly) reordering */
|
||||
#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
|
||||
|
||||
/* TIMESTAMP */
|
||||
|
@ -195,7 +195,7 @@ union zip_inst_s {
|
||||
uint64_t bf : 1;
|
||||
/** Comp/decomp operation */
|
||||
uint64_t op : 2;
|
||||
/** Data sactter */
|
||||
/** Data scatter */
|
||||
uint64_t ds : 1;
|
||||
/** Data gather */
|
||||
uint64_t dg : 1;
|
||||
@ -376,7 +376,7 @@ union zip_inst_s {
|
||||
uint64_t bf : 1;
|
||||
/** Comp/decomp operation */
|
||||
uint64_t op : 2;
|
||||
/** Data sactter */
|
||||
/** Data scatter */
|
||||
uint64_t ds : 1;
|
||||
/** Data gather */
|
||||
uint64_t dg : 1;
|
||||
|
@ -31,7 +31,7 @@ extern int octtx_zip_logtype_driver;
|
||||
/**< PCI device id of ZIP VF */
|
||||
#define PCI_DEVICE_ID_OCTEONTX_ZIPVF 0xA037
|
||||
|
||||
/* maxmum number of zip vf devices */
|
||||
/* maximum number of zip vf devices */
|
||||
#define ZIP_MAX_VFS 8
|
||||
|
||||
/* max size of one chunk */
|
||||
|
@ -463,7 +463,7 @@ qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
|
||||
} else if (info.error) {
|
||||
rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
|
||||
QAT_LOG(ERR,
|
||||
"Destoying mempool %s as at least one element failed initialisation",
|
||||
"Destroying mempool %s as at least one element failed initialisation",
|
||||
stream_pool_name);
|
||||
rte_mempool_free(mp);
|
||||
mp = NULL;
|
||||
|