2017-12-19 15:49:05 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2015 Intel Corporation
|
2012-12-19 23:00:00 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
#include <inttypes.h>
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
#include <rte_lcore.h>
|
|
|
|
#include <rte_cycles.h>
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_hash.h>
|
|
|
|
#include <rte_hash_crc.h>
|
|
|
|
#include <rte_jhash.h>
|
|
|
|
#include <rte_fbk_hash.h>
|
2012-12-19 23:00:00 +00:00
|
|
|
#include <rte_random.h>
|
|
|
|
#include <rte_string_fns.h>
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
#include "test.h"
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
#define MAX_ENTRIES (1 << 19)
|
2018-10-22 18:39:47 +00:00
|
|
|
#define KEYS_TO_ADD (MAX_ENTRIES)
|
|
|
|
#define ADD_PERCENT 0.75 /* 75% table utilization */
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
#define NUM_LOOKUPS (KEYS_TO_ADD * 5) /* Loop among keys added, several times */
|
2018-09-28 14:11:05 +00:00
|
|
|
/* BUCKET_SIZE should be same as RTE_HASH_BUCKET_ENTRIES in rte_hash library */
|
|
|
|
#define BUCKET_SIZE 8
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
#define NUM_BUCKETS (MAX_ENTRIES / BUCKET_SIZE)
|
|
|
|
#define MAX_KEYSIZE 64
|
|
|
|
#define NUM_KEYSIZES 10
|
|
|
|
#define NUM_SHUFFLES 10
|
|
|
|
#define BURST_SIZE 16
|
|
|
|
|
|
|
|
enum operations {
|
|
|
|
ADD = 0,
|
|
|
|
LOOKUP,
|
|
|
|
LOOKUP_MULTI,
|
|
|
|
DELETE,
|
|
|
|
NUM_OPERATIONS
|
2012-12-19 23:00:00 +00:00
|
|
|
};
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
static uint32_t hashtest_key_lens[] = {
|
|
|
|
/* standard key sizes */
|
|
|
|
4, 8, 16, 32, 48, 64,
|
|
|
|
/* IPv4 SRC + DST + protocol, unpadded */
|
|
|
|
9,
|
|
|
|
/* IPv4 5-tuple, unpadded */
|
|
|
|
13,
|
|
|
|
/* IPv6 5-tuple, unpadded */
|
|
|
|
37,
|
|
|
|
/* IPv6 5-tuple, padded to 8-byte boundary */
|
|
|
|
40
|
2012-12-19 23:00:00 +00:00
|
|
|
};
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
struct rte_hash *h[NUM_KEYSIZES];
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/* Array that stores if a slot is full */
|
2019-09-05 14:53:15 +00:00
|
|
|
static uint8_t slot_taken[MAX_ENTRIES];
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/* Array to store number of cycles per operation */
|
2019-09-05 14:53:15 +00:00
|
|
|
static uint64_t cycles[NUM_KEYSIZES][NUM_OPERATIONS][2][2];
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/* Array to store all input keys */
|
2019-09-05 14:53:15 +00:00
|
|
|
static uint8_t keys[KEYS_TO_ADD][MAX_KEYSIZE];
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/* Array to store the precomputed hash for 'keys' */
|
2019-09-05 14:53:15 +00:00
|
|
|
static hash_sig_t signatures[KEYS_TO_ADD];
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
|
|
|
/* Array to store how many busy entries have each bucket */
|
2019-09-05 14:53:15 +00:00
|
|
|
static uint8_t buckets[NUM_BUCKETS];
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
|
|
|
/* Array to store the positions where keys are added */
|
2019-09-05 14:53:15 +00:00
|
|
|
static int32_t positions[KEYS_TO_ADD];
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
|
|
|
/* Parameters used for hash table in unit test functions. */
|
|
|
|
static struct rte_hash_parameters ut_params = {
|
|
|
|
.entries = MAX_ENTRIES,
|
|
|
|
.hash_func = rte_jhash,
|
|
|
|
.hash_func_init_val = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2018-07-10 16:59:59 +00:00
|
|
|
create_table(unsigned int with_data, unsigned int table_index,
|
2018-10-22 18:39:47 +00:00
|
|
|
unsigned int with_locks, unsigned int ext)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
char name[RTE_HASH_NAMESIZE];
|
|
|
|
|
2015-07-11 00:18:52 +00:00
|
|
|
if (with_data)
|
|
|
|
/* Table will store 8-byte data */
|
2019-03-26 10:27:23 +00:00
|
|
|
snprintf(name, sizeof(name), "test_hash%u_data",
|
|
|
|
hashtest_key_lens[table_index]);
|
2015-07-11 00:18:52 +00:00
|
|
|
else
|
2019-03-26 10:27:23 +00:00
|
|
|
snprintf(name, sizeof(name), "test_hash%u",
|
|
|
|
hashtest_key_lens[table_index]);
|
2015-07-11 00:18:52 +00:00
|
|
|
|
2018-07-10 16:59:59 +00:00
|
|
|
|
|
|
|
if (with_locks)
|
|
|
|
ut_params.extra_flag =
|
|
|
|
RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT
|
|
|
|
| RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY;
|
|
|
|
else
|
|
|
|
ut_params.extra_flag = 0;
|
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
if (ext)
|
|
|
|
ut_params.extra_flag |= RTE_HASH_EXTRA_FLAGS_EXT_TABLE;
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
ut_params.name = name;
|
|
|
|
ut_params.key_len = hashtest_key_lens[table_index];
|
|
|
|
ut_params.socket_id = rte_socket_id();
|
|
|
|
h[table_index] = rte_hash_find_existing(name);
|
|
|
|
if (h[table_index] != NULL)
|
|
|
|
/*
|
|
|
|
* If table was already created, free it to create it again,
|
|
|
|
* so we force it is empty
|
|
|
|
*/
|
|
|
|
rte_hash_free(h[table_index]);
|
|
|
|
h[table_index] = rte_hash_create(&ut_params);
|
|
|
|
if (h[table_index] == NULL) {
|
|
|
|
printf("Error creating table\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/* Shuffle the keys that have been added, so lookups will be totally random */
|
|
|
|
static void
|
2018-10-22 18:39:47 +00:00
|
|
|
shuffle_input_keys(unsigned int table_index, unsigned int ext)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
unsigned i;
|
|
|
|
uint32_t swap_idx;
|
2015-09-04 09:05:40 +00:00
|
|
|
uint8_t temp_key[MAX_KEYSIZE];
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
hash_sig_t temp_signature;
|
|
|
|
int32_t temp_position;
|
2018-10-22 18:39:47 +00:00
|
|
|
unsigned int keys_to_add;
|
|
|
|
|
|
|
|
if (!ext)
|
|
|
|
keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
|
|
|
|
else
|
|
|
|
keys_to_add = KEYS_TO_ADD;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
for (i = keys_to_add - 1; i > 0; i--) {
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
swap_idx = rte_rand() % i;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
memcpy(temp_key, keys[i], hashtest_key_lens[table_index]);
|
|
|
|
temp_signature = signatures[i];
|
|
|
|
temp_position = positions[i];
|
|
|
|
|
|
|
|
memcpy(keys[i], keys[swap_idx], hashtest_key_lens[table_index]);
|
|
|
|
signatures[i] = signatures[swap_idx];
|
|
|
|
positions[i] = positions[swap_idx];
|
|
|
|
|
|
|
|
memcpy(keys[swap_idx], temp_key, hashtest_key_lens[table_index]);
|
|
|
|
signatures[swap_idx] = temp_signature;
|
|
|
|
positions[swap_idx] = temp_position;
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
* Looks for random keys which
|
|
|
|
* ALL can fit in hash table (no errors)
|
2012-12-19 23:00:00 +00:00
|
|
|
*/
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
static int
|
2018-10-22 18:39:47 +00:00
|
|
|
get_input_keys(unsigned int with_pushes, unsigned int table_index,
|
|
|
|
unsigned int ext)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
unsigned i, j;
|
|
|
|
unsigned bucket_idx, incr, success = 1;
|
|
|
|
uint8_t k = 0;
|
|
|
|
int32_t ret;
|
|
|
|
const uint32_t bucket_bitmask = NUM_BUCKETS - 1;
|
2018-10-22 18:39:47 +00:00
|
|
|
unsigned int keys_to_add;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
if (!ext)
|
|
|
|
keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
|
|
|
|
else
|
|
|
|
keys_to_add = KEYS_TO_ADD;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/* Reset all arrays */
|
|
|
|
for (i = 0; i < MAX_ENTRIES; i++)
|
|
|
|
slot_taken[i] = 0;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
for (i = 0; i < NUM_BUCKETS; i++)
|
|
|
|
buckets[i] = 0;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
for (j = 0; j < hashtest_key_lens[table_index]; j++)
|
|
|
|
keys[0][j] = 0;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/*
|
|
|
|
* Add only entries that are not duplicated and that fits in the table
|
|
|
|
* (cannot store more than BUCKET_SIZE entries in a bucket).
|
|
|
|
* Regardless a key has been added correctly or not (success),
|
|
|
|
* the next one to try will be increased by 1.
|
|
|
|
*/
|
2018-10-22 18:39:47 +00:00
|
|
|
for (i = 0; i < keys_to_add;) {
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
incr = 0;
|
|
|
|
if (i != 0) {
|
|
|
|
keys[i][0] = ++k;
|
|
|
|
/* Overflow, need to increment the next byte */
|
|
|
|
if (keys[i][0] == 0)
|
|
|
|
incr = 1;
|
|
|
|
for (j = 1; j < hashtest_key_lens[table_index]; j++) {
|
|
|
|
/* Do not increase next byte */
|
|
|
|
if (incr == 0)
|
|
|
|
if (success == 1)
|
|
|
|
keys[i][j] = keys[i - 1][j];
|
|
|
|
else
|
|
|
|
keys[i][j] = keys[i][j];
|
|
|
|
/* Increase next byte by one */
|
|
|
|
else {
|
|
|
|
if (success == 1)
|
|
|
|
keys[i][j] = keys[i-1][j] + 1;
|
|
|
|
else
|
|
|
|
keys[i][j] = keys[i][j] + 1;
|
|
|
|
if (keys[i][j] == 0)
|
|
|
|
incr = 1;
|
|
|
|
else
|
|
|
|
incr = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
success = 0;
|
|
|
|
signatures[i] = rte_hash_hash(h[table_index], keys[i]);
|
|
|
|
bucket_idx = signatures[i] & bucket_bitmask;
|
hash: replace with cuckoo hash implementation
This patch replaces the existing hash library with another approach,
using the Cuckoo Hash method to resolve collisions (open addressing),
which pushes items from a full bucket when a new entry tries
to be added in it, storing the evicted entry in an alternative location,
using a secondary hash function.
This gives the user the ability to store more entries when a bucket
is full, in comparison with the previous implementation.
Therefore, the unit test has been updated, as some scenarios have changed
(such as the previous removed restriction).
Also note that the API has not been changed, although new fields
have been added in the rte_hash structure (structure is internal now).
The main change when creating a new table is that the number of entries
per bucket is fixed now, so its parameter is ignored now
(still there to maintain the same parameters structure).
The hash unit test has been updated to reflect these changes.
As a last note, the maximum burst size in lookup_burst function
hash been increased to 64, to improve performance.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-11 00:18:50 +00:00
|
|
|
/*
|
|
|
|
* If we are not inserting keys in secondary location,
|
|
|
|
* when bucket is full, do not try to insert the key
|
|
|
|
*/
|
|
|
|
if (with_pushes == 0)
|
|
|
|
if (buckets[bucket_idx] == BUCKET_SIZE)
|
|
|
|
continue;
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/* If key can be added, leave in successful key arrays "keys" */
|
|
|
|
ret = rte_hash_add_key_with_hash(h[table_index], keys[i],
|
|
|
|
signatures[i]);
|
|
|
|
if (ret >= 0) {
|
|
|
|
/* If key is already added, ignore the entry and do not store */
|
|
|
|
if (slot_taken[ret])
|
|
|
|
continue;
|
|
|
|
else {
|
|
|
|
/* Store the returned position and mark slot as taken */
|
|
|
|
slot_taken[ret] = 1;
|
2015-07-11 00:18:52 +00:00
|
|
|
positions[i] = ret;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
buckets[bucket_idx]++;
|
|
|
|
success = 1;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
/* Reset the table, so we can measure the time to add all the entries */
|
|
|
|
rte_hash_free(h[table_index]);
|
|
|
|
h[table_index] = rte_hash_create(&ut_params);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-10-22 18:39:47 +00:00
|
|
|
timed_adds(unsigned int with_hash, unsigned int with_data,
|
|
|
|
unsigned int table_index, unsigned int ext)
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
const uint64_t start_tsc = rte_rdtsc();
|
2015-07-11 00:18:52 +00:00
|
|
|
void *data;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
int32_t ret;
|
2018-10-22 18:39:47 +00:00
|
|
|
unsigned int keys_to_add;
|
|
|
|
if (!ext)
|
|
|
|
keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
|
|
|
|
else
|
|
|
|
keys_to_add = KEYS_TO_ADD;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
for (i = 0; i < keys_to_add; i++) {
|
2015-07-11 00:18:52 +00:00
|
|
|
data = (void *) ((uintptr_t) signatures[i]);
|
|
|
|
if (with_hash && with_data) {
|
|
|
|
ret = rte_hash_add_key_with_hash_data(h[table_index],
|
|
|
|
(const void *) keys[i],
|
|
|
|
signatures[i], data);
|
|
|
|
if (ret < 0) {
|
2018-09-28 14:11:06 +00:00
|
|
|
printf("H+D: Failed to add key number %u\n", i);
|
2015-07-11 00:18:52 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (with_hash && !with_data) {
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
ret = rte_hash_add_key_with_hash(h[table_index],
|
|
|
|
(const void *) keys[i],
|
|
|
|
signatures[i]);
|
2015-07-11 00:18:52 +00:00
|
|
|
if (ret >= 0)
|
|
|
|
positions[i] = ret;
|
|
|
|
else {
|
2018-09-28 14:11:06 +00:00
|
|
|
printf("H: Failed to add key number %u\n", i);
|
2015-07-11 00:18:52 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (!with_hash && with_data) {
|
|
|
|
ret = rte_hash_add_key_data(h[table_index],
|
|
|
|
(const void *) keys[i],
|
|
|
|
data);
|
|
|
|
if (ret < 0) {
|
2018-09-28 14:11:06 +00:00
|
|
|
printf("D: Failed to add key number %u\n", i);
|
2015-07-11 00:18:52 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
ret = rte_hash_add_key(h[table_index], keys[i]);
|
2015-07-11 00:18:52 +00:00
|
|
|
if (ret >= 0)
|
|
|
|
positions[i] = ret;
|
|
|
|
else {
|
2018-09-28 14:11:06 +00:00
|
|
|
printf("Failed to add key number %u\n", i);
|
2015-07-11 00:18:52 +00:00
|
|
|
return -1;
|
|
|
|
}
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
const uint64_t end_tsc = rte_rdtsc();
|
|
|
|
const uint64_t time_taken = end_tsc - start_tsc;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
cycles[table_index][ADD][with_hash][with_data] = time_taken/keys_to_add;
|
2015-07-11 00:18:52 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
return 0;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
static int
|
2018-10-22 18:39:47 +00:00
|
|
|
timed_lookups(unsigned int with_hash, unsigned int with_data,
|
|
|
|
unsigned int table_index, unsigned int ext)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
unsigned i, j;
|
|
|
|
const uint64_t start_tsc = rte_rdtsc();
|
2015-07-11 00:18:52 +00:00
|
|
|
void *ret_data;
|
|
|
|
void *expected_data;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
int32_t ret;
|
2018-10-22 18:39:47 +00:00
|
|
|
unsigned int keys_to_add, num_lookups;
|
|
|
|
|
|
|
|
if (!ext) {
|
|
|
|
keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
|
|
|
|
num_lookups = NUM_LOOKUPS * ADD_PERCENT;
|
|
|
|
} else {
|
|
|
|
keys_to_add = KEYS_TO_ADD;
|
|
|
|
num_lookups = NUM_LOOKUPS;
|
|
|
|
}
|
|
|
|
for (i = 0; i < num_lookups / keys_to_add; i++) {
|
|
|
|
for (j = 0; j < keys_to_add; j++) {
|
2015-07-11 00:18:52 +00:00
|
|
|
if (with_hash && with_data) {
|
|
|
|
ret = rte_hash_lookup_with_hash_data(h[table_index],
|
|
|
|
(const void *) keys[j],
|
|
|
|
signatures[j], &ret_data);
|
|
|
|
if (ret < 0) {
|
|
|
|
printf("Key number %u was not found\n", j);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
expected_data = (void *) ((uintptr_t) signatures[j]);
|
|
|
|
if (ret_data != expected_data) {
|
|
|
|
printf("Data returned for key number %u is %p,"
|
|
|
|
" but should be %p\n", j, ret_data,
|
|
|
|
expected_data);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (with_hash && !with_data) {
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
ret = rte_hash_lookup_with_hash(h[table_index],
|
|
|
|
(const void *) keys[j],
|
|
|
|
signatures[j]);
|
2015-07-11 00:18:52 +00:00
|
|
|
if (ret < 0 || ret != positions[j]) {
|
|
|
|
printf("Key looked up in %d, should be in %d\n",
|
|
|
|
ret, positions[j]);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (!with_hash && with_data) {
|
|
|
|
ret = rte_hash_lookup_data(h[table_index],
|
|
|
|
(const void *) keys[j], &ret_data);
|
|
|
|
if (ret < 0) {
|
|
|
|
printf("Key number %u was not found\n", j);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
expected_data = (void *) ((uintptr_t) signatures[j]);
|
|
|
|
if (ret_data != expected_data) {
|
|
|
|
printf("Data returned for key number %u is %p,"
|
|
|
|
" but should be %p\n", j, ret_data,
|
|
|
|
expected_data);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
ret = rte_hash_lookup(h[table_index], keys[j]);
|
2015-07-11 00:18:52 +00:00
|
|
|
if (ret < 0 || ret != positions[j]) {
|
|
|
|
printf("Key looked up in %d, should be in %d\n",
|
|
|
|
ret, positions[j]);
|
|
|
|
return -1;
|
|
|
|
}
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
}
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
|
|
|
const uint64_t end_tsc = rte_rdtsc();
|
|
|
|
const uint64_t time_taken = end_tsc - start_tsc;
|
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
cycles[table_index][LOOKUP][with_hash][with_data] = time_taken/num_lookups;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
|
|
|
return 0;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-04-16 15:07:19 +00:00
|
|
|
timed_lookups_multi(unsigned int with_hash, unsigned int with_data,
|
|
|
|
unsigned int table_index, unsigned int ext)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
unsigned i, j, k;
|
|
|
|
int32_t positions_burst[BURST_SIZE];
|
|
|
|
const void *keys_burst[BURST_SIZE];
|
2015-07-11 00:18:52 +00:00
|
|
|
void *expected_data[BURST_SIZE];
|
|
|
|
void *ret_data[BURST_SIZE];
|
|
|
|
uint64_t hit_mask;
|
|
|
|
int ret;
|
2018-10-22 18:39:47 +00:00
|
|
|
unsigned int keys_to_add, num_lookups;
|
|
|
|
|
|
|
|
if (!ext) {
|
|
|
|
keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
|
|
|
|
num_lookups = NUM_LOOKUPS * ADD_PERCENT;
|
|
|
|
} else {
|
|
|
|
keys_to_add = KEYS_TO_ADD;
|
|
|
|
num_lookups = NUM_LOOKUPS;
|
|
|
|
}
|
2015-07-11 00:18:52 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
const uint64_t start_tsc = rte_rdtsc();
|
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
for (i = 0; i < num_lookups/keys_to_add; i++) {
|
|
|
|
for (j = 0; j < keys_to_add/BURST_SIZE; j++) {
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
for (k = 0; k < BURST_SIZE; k++)
|
|
|
|
keys_burst[k] = keys[j * BURST_SIZE + k];
|
2020-04-16 15:07:19 +00:00
|
|
|
if (!with_hash && with_data) {
|
2015-07-11 00:18:52 +00:00
|
|
|
ret = rte_hash_lookup_bulk_data(h[table_index],
|
|
|
|
(const void **) keys_burst,
|
|
|
|
BURST_SIZE,
|
|
|
|
&hit_mask,
|
|
|
|
ret_data);
|
|
|
|
if (ret != BURST_SIZE) {
|
|
|
|
printf("Expect to find %u keys,"
|
|
|
|
" but found %d\n", BURST_SIZE, ret);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
for (k = 0; k < BURST_SIZE; k++) {
|
|
|
|
if ((hit_mask & (1ULL << k)) == 0) {
|
|
|
|
printf("Key number %u not found\n",
|
|
|
|
j * BURST_SIZE + k);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
expected_data[k] = (void *) ((uintptr_t) signatures[j * BURST_SIZE + k]);
|
|
|
|
if (ret_data[k] != expected_data[k]) {
|
|
|
|
printf("Data returned for key number %u is %p,"
|
|
|
|
" but should be %p\n", j * BURST_SIZE + k,
|
|
|
|
ret_data[k], expected_data[k]);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2020-04-16 15:07:19 +00:00
|
|
|
} else if (with_hash && with_data) {
|
|
|
|
ret = rte_hash_lookup_with_hash_bulk_data(
|
|
|
|
h[table_index],
|
|
|
|
(const void **)keys_burst,
|
|
|
|
&signatures[j * BURST_SIZE],
|
|
|
|
BURST_SIZE, &hit_mask, ret_data);
|
|
|
|
if (ret != BURST_SIZE) {
|
|
|
|
printf("Expect to find %u keys,"
|
|
|
|
" but found %d\n",
|
|
|
|
BURST_SIZE, ret);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
for (k = 0; k < BURST_SIZE; k++) {
|
|
|
|
if ((hit_mask & (1ULL << k)) == 0) {
|
|
|
|
printf("Key number %u"
|
|
|
|
" not found\n",
|
|
|
|
j * BURST_SIZE + k);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
expected_data[k] =
|
|
|
|
(void *)((uintptr_t)signatures[
|
|
|
|
j * BURST_SIZE + k]);
|
|
|
|
if (ret_data[k] != expected_data[k]) {
|
|
|
|
printf("Data returned for key"
|
|
|
|
" number %u is %p,"
|
|
|
|
" but should be %p\n",
|
|
|
|
j * BURST_SIZE + k,
|
|
|
|
ret_data[k],
|
|
|
|
expected_data[k]);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (with_hash && !with_data) {
|
|
|
|
ret = rte_hash_lookup_with_hash_bulk(
|
|
|
|
h[table_index],
|
|
|
|
(const void **)keys_burst,
|
|
|
|
&signatures[j * BURST_SIZE],
|
|
|
|
BURST_SIZE, positions_burst);
|
|
|
|
for (k = 0; k < BURST_SIZE; k++) {
|
|
|
|
if (positions_burst[k] !=
|
|
|
|
positions[j *
|
|
|
|
BURST_SIZE + k]) {
|
|
|
|
printf("Key looked up in %d, should be in %d\n",
|
|
|
|
positions_burst[k],
|
|
|
|
positions[j *
|
|
|
|
BURST_SIZE + k]);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2015-07-11 00:18:52 +00:00
|
|
|
} else {
|
|
|
|
rte_hash_lookup_bulk(h[table_index],
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
(const void **) keys_burst,
|
|
|
|
BURST_SIZE,
|
|
|
|
positions_burst);
|
2015-07-11 00:18:52 +00:00
|
|
|
for (k = 0; k < BURST_SIZE; k++) {
|
|
|
|
if (positions_burst[k] != positions[j * BURST_SIZE + k]) {
|
|
|
|
printf("Key looked up in %d, should be in %d\n",
|
|
|
|
positions_burst[k],
|
|
|
|
positions[j * BURST_SIZE + k]);
|
|
|
|
return -1;
|
|
|
|
}
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
const uint64_t end_tsc = rte_rdtsc();
|
|
|
|
const uint64_t time_taken = end_tsc - start_tsc;
|
|
|
|
|
2020-04-16 15:07:19 +00:00
|
|
|
cycles[table_index][LOOKUP_MULTI][with_hash][with_data] =
|
|
|
|
time_taken/num_lookups;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
static int
|
2018-10-22 18:39:47 +00:00
|
|
|
timed_deletes(unsigned int with_hash, unsigned int with_data,
|
|
|
|
unsigned int table_index, unsigned int ext)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
|
|
|
unsigned i;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
const uint64_t start_tsc = rte_rdtsc();
|
|
|
|
int32_t ret;
|
2018-10-22 18:39:47 +00:00
|
|
|
unsigned int keys_to_add;
|
|
|
|
if (!ext)
|
|
|
|
keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
|
|
|
|
else
|
|
|
|
keys_to_add = KEYS_TO_ADD;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
for (i = 0; i < keys_to_add; i++) {
|
2015-07-11 00:18:52 +00:00
|
|
|
/* There are no delete functions with data, so just call two functions */
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
if (with_hash)
|
|
|
|
ret = rte_hash_del_key_with_hash(h[table_index],
|
|
|
|
(const void *) keys[i],
|
|
|
|
signatures[i]);
|
|
|
|
else
|
|
|
|
ret = rte_hash_del_key(h[table_index],
|
|
|
|
(const void *) keys[i]);
|
|
|
|
if (ret >= 0)
|
|
|
|
positions[i] = ret;
|
|
|
|
else {
|
2018-09-28 14:11:06 +00:00
|
|
|
printf("Failed to delete key number %u\n", i);
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint64_t end_tsc = rte_rdtsc();
|
|
|
|
const uint64_t time_taken = end_tsc - start_tsc;
|
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
cycles[table_index][DELETE][with_hash][with_data] = time_taken/keys_to_add;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
free_table(unsigned table_index)
|
|
|
|
{
|
|
|
|
rte_hash_free(h[table_index]);
|
|
|
|
}
|
|
|
|
|
2015-07-11 00:18:51 +00:00
|
|
|
static void
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
reset_table(unsigned table_index)
|
|
|
|
{
|
2015-07-11 00:18:51 +00:00
|
|
|
rte_hash_reset(h[table_index]);
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-10-22 18:39:47 +00:00
|
|
|
run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks,
|
|
|
|
unsigned int ext)
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
{
|
2015-07-11 00:18:52 +00:00
|
|
|
unsigned i, j, with_data, with_hash;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
printf("Measuring performance, please wait");
|
|
|
|
fflush(stdout);
|
|
|
|
|
2015-07-11 00:18:52 +00:00
|
|
|
for (with_data = 0; with_data <= 1; with_data++) {
|
|
|
|
for (i = 0; i < NUM_KEYSIZES; i++) {
|
2018-10-22 18:39:47 +00:00
|
|
|
if (create_table(with_data, i, with_locks, ext) < 0)
|
hash: replace with cuckoo hash implementation
This patch replaces the existing hash library with another approach,
using the Cuckoo Hash method to resolve collisions (open addressing),
which pushes items from a full bucket when a new entry tries
to be added in it, storing the evicted entry in an alternative location,
using a secondary hash function.
This gives the user the ability to store more entries when a bucket
is full, in comparison with the previous implementation.
Therefore, the unit test has been updated, as some scenarios have changed
(such as the previous removed restriction).
Also note that the API has not been changed, although new fields
have been added in the rte_hash structure (structure is internal now).
The main change when creating a new table is that the number of entries
per bucket is fixed now, so its parameter is ignored now
(still there to maintain the same parameters structure).
The hash unit test has been updated to reflect these changes.
As a last note, the maximum burst size in lookup_burst function
hash been increased to 64, to improve performance.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-11 00:18:50 +00:00
|
|
|
return -1;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
if (get_input_keys(with_pushes, i, ext) < 0)
|
hash: replace with cuckoo hash implementation
This patch replaces the existing hash library with another approach,
using the Cuckoo Hash method to resolve collisions (open addressing),
which pushes items from a full bucket when a new entry tries
to be added in it, storing the evicted entry in an alternative location,
using a secondary hash function.
This gives the user the ability to store more entries when a bucket
is full, in comparison with the previous implementation.
Therefore, the unit test has been updated, as some scenarios have changed
(such as the previous removed restriction).
Also note that the API has not been changed, although new fields
have been added in the rte_hash structure (structure is internal now).
The main change when creating a new table is that the number of entries
per bucket is fixed now, so its parameter is ignored now
(still there to maintain the same parameters structure).
The hash unit test has been updated to reflect these changes.
As a last note, the maximum burst size in lookup_burst function
hash been increased to 64, to improve performance.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-11 00:18:50 +00:00
|
|
|
return -1;
|
2015-07-11 00:18:52 +00:00
|
|
|
for (with_hash = 0; with_hash <= 1; with_hash++) {
|
2018-10-22 18:39:47 +00:00
|
|
|
if (timed_adds(with_hash, with_data, i, ext) < 0)
|
2015-07-11 00:18:52 +00:00
|
|
|
return -1;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2015-07-11 00:18:52 +00:00
|
|
|
for (j = 0; j < NUM_SHUFFLES; j++)
|
2018-10-22 18:39:47 +00:00
|
|
|
shuffle_input_keys(i, ext);
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
if (timed_lookups(with_hash, with_data, i, ext) < 0)
|
2015-07-11 00:18:52 +00:00
|
|
|
return -1;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2020-04-16 15:07:19 +00:00
|
|
|
if (timed_lookups_multi(with_hash, with_data,
|
|
|
|
i, ext) < 0)
|
2015-07-11 00:18:52 +00:00
|
|
|
return -1;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2018-10-22 18:39:47 +00:00
|
|
|
if (timed_deletes(with_hash, with_data, i, ext) < 0)
|
2015-07-11 00:18:52 +00:00
|
|
|
return -1;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2015-07-11 00:18:52 +00:00
|
|
|
/* Print a dot to show progress on operations */
|
|
|
|
printf(".");
|
|
|
|
fflush(stdout);
|
|
|
|
|
|
|
|
reset_table(i);
|
|
|
|
}
|
|
|
|
free_table(i);
|
|
|
|
}
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
}
|
2015-07-11 00:18:52 +00:00
|
|
|
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
printf("\nResults (in CPU cycles/operation)\n");
|
2015-07-11 00:18:52 +00:00
|
|
|
printf("-----------------------------------\n");
|
|
|
|
for (with_data = 0; with_data <= 1; with_data++) {
|
|
|
|
if (with_data)
|
|
|
|
printf("\n Operations with 8-byte data\n");
|
|
|
|
else
|
|
|
|
printf("\n Operations without data\n");
|
|
|
|
for (with_hash = 0; with_hash <= 1; with_hash++) {
|
|
|
|
if (with_hash)
|
|
|
|
printf("\nWith pre-computed hash values\n");
|
|
|
|
else
|
|
|
|
printf("\nWithout pre-computed hash values\n");
|
|
|
|
|
|
|
|
printf("\n%-18s%-18s%-18s%-18s%-18s\n",
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
"Keysize", "Add", "Lookup", "Lookup_bulk", "Delete");
|
2015-07-11 00:18:52 +00:00
|
|
|
for (i = 0; i < NUM_KEYSIZES; i++) {
|
|
|
|
printf("%-18d", hashtest_key_lens[i]);
|
|
|
|
for (j = 0; j < NUM_OPERATIONS; j++)
|
|
|
|
printf("%-18"PRIu64, cycles[i][j][with_hash][with_data]);
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Control operation of performance testing of fbk hash. */
|
|
|
|
#define LOAD_FACTOR 0.667 /* How full to make the hash table. */
|
|
|
|
#define TEST_SIZE 1000000 /* How many operations to time. */
|
|
|
|
#define TEST_ITERATIONS 30 /* How many measurements to take. */
|
|
|
|
#define ENTRIES (1 << 15) /* How many entries. */
|
|
|
|
|
|
|
|
static int
|
|
|
|
fbk_hash_perf_test(void)
|
|
|
|
{
|
|
|
|
struct rte_fbk_hash_params params = {
|
|
|
|
.name = "fbk_hash_test",
|
|
|
|
.entries = ENTRIES,
|
|
|
|
.entries_per_bucket = 4,
|
|
|
|
.socket_id = rte_socket_id(),
|
|
|
|
};
|
2014-10-17 13:18:12 +00:00
|
|
|
struct rte_fbk_hash_table *handle = NULL;
|
|
|
|
uint32_t *keys = NULL;
|
2012-12-19 23:00:00 +00:00
|
|
|
unsigned indexes[TEST_SIZE];
|
|
|
|
uint64_t lookup_time = 0;
|
|
|
|
unsigned added = 0;
|
|
|
|
unsigned value = 0;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
uint32_t key;
|
|
|
|
uint16_t val;
|
2012-12-19 23:00:00 +00:00
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
handle = rte_fbk_hash_create(¶ms);
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
if (handle == NULL) {
|
|
|
|
printf("Error creating table\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2014-10-17 13:18:12 +00:00
|
|
|
keys = rte_zmalloc(NULL, ENTRIES * sizeof(*keys), 0);
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
if (keys == NULL) {
|
|
|
|
printf("fbk hash: memory allocation for key store failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-10-17 13:18:12 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
/* Generate random keys and values. */
|
|
|
|
for (i = 0; i < ENTRIES; i++) {
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
key = (uint32_t)rte_rand();
|
2012-12-19 23:00:00 +00:00
|
|
|
key = ((uint64_t)key << 32) | (uint64_t)rte_rand();
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
val = (uint16_t)rte_rand();
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
if (rte_fbk_hash_add_key(handle, key, val) == 0) {
|
|
|
|
keys[added] = key;
|
|
|
|
added++;
|
|
|
|
}
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
if (added > (LOAD_FACTOR * ENTRIES))
|
2012-12-19 23:00:00 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < TEST_ITERATIONS; i++) {
|
|
|
|
uint64_t begin;
|
|
|
|
uint64_t end;
|
|
|
|
|
|
|
|
/* Generate random indexes into keys[] array. */
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
for (j = 0; j < TEST_SIZE; j++)
|
2012-12-19 23:00:00 +00:00
|
|
|
indexes[j] = rte_rand() % added;
|
|
|
|
|
|
|
|
begin = rte_rdtsc();
|
|
|
|
/* Do lookups */
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
for (j = 0; j < TEST_SIZE; j++)
|
2012-12-19 23:00:00 +00:00
|
|
|
value += rte_fbk_hash_lookup(handle, keys[indexes[j]]);
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
end = rte_rdtsc();
|
|
|
|
lookup_time += (double)(end - begin);
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("\n\n *** FBK Hash function performance test results ***\n");
|
|
|
|
/*
|
|
|
|
* The use of the 'value' variable ensures that the hash lookup is not
|
|
|
|
* being optimised out by the compiler.
|
|
|
|
*/
|
|
|
|
if (value != 0)
|
|
|
|
printf("Number of ticks per lookup = %g\n",
|
|
|
|
(double)lookup_time /
|
|
|
|
((double)TEST_ITERATIONS * (double)TEST_SIZE));
|
|
|
|
|
|
|
|
rte_fbk_hash_free(handle);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-18 11:29:23 +00:00
|
|
|
static int
|
|
|
|
test_hash_perf(void)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2018-07-10 16:59:59 +00:00
|
|
|
unsigned int with_pushes, with_locks;
|
|
|
|
for (with_locks = 0; with_locks <= 1; with_locks++) {
|
|
|
|
if (with_locks)
|
|
|
|
printf("\nWith locks in the code\n");
|
hash: replace with cuckoo hash implementation
This patch replaces the existing hash library with another approach,
using the Cuckoo Hash method to resolve collisions (open addressing),
which pushes items from a full bucket when a new entry tries
to be added in it, storing the evicted entry in an alternative location,
using a secondary hash function.
This gives the user the ability to store more entries when a bucket
is full, in comparison with the previous implementation.
Therefore, the unit test has been updated, as some scenarios have changed
(such as the previous removed restriction).
Also note that the API has not been changed, although new fields
have been added in the rte_hash structure (structure is internal now).
The main change when creating a new table is that the number of entries
per bucket is fixed now, so its parameter is ignored now
(still there to maintain the same parameters structure).
The hash unit test has been updated to reflect these changes.
As a last note, the maximum burst size in lookup_burst function
hash been increased to 64, to improve performance.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-11 00:18:50 +00:00
|
|
|
else
|
2018-07-10 16:59:59 +00:00
|
|
|
printf("\nWithout locks in the code\n");
|
|
|
|
for (with_pushes = 0; with_pushes <= 1; with_pushes++) {
|
|
|
|
if (with_pushes == 0)
|
|
|
|
printf("\nALL ELEMENTS IN PRIMARY LOCATION\n");
|
|
|
|
else
|
|
|
|
printf("\nELEMENTS IN PRIMARY OR SECONDARY LOCATION\n");
|
2018-10-22 18:39:47 +00:00
|
|
|
if (run_all_tbl_perf_tests(with_pushes, with_locks, 0) < 0)
|
2018-07-10 16:59:59 +00:00
|
|
|
return -1;
|
|
|
|
}
|
hash: replace with cuckoo hash implementation
This patch replaces the existing hash library with another approach,
using the Cuckoo Hash method to resolve collisions (open addressing),
which pushes items from a full bucket when a new entry tries
to be added in it, storing the evicted entry in an alternative location,
using a secondary hash function.
This gives the user the ability to store more entries when a bucket
is full, in comparison with the previous implementation.
Therefore, the unit test has been updated, as some scenarios have changed
(such as the previous removed restriction).
Also note that the API has not been changed, although new fields
have been added in the rte_hash structure (structure is internal now).
The main change when creating a new table is that the number of entries
per bucket is fixed now, so its parameter is ignored now
(still there to maintain the same parameters structure).
The hash unit test has been updated to reflect these changes.
As a last note, the maximum burst size in lookup_burst function
hash been increased to 64, to improve performance.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-11 00:18:50 +00:00
|
|
|
}
|
2018-10-22 18:39:47 +00:00
|
|
|
|
|
|
|
printf("\n EXTENDABLE BUCKETS PERFORMANCE\n");
|
|
|
|
|
|
|
|
if (run_all_tbl_perf_tests(1, 0, 1) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
if (fbk_hash_perf_test() < 0)
|
|
|
|
return -1;
|
app/test: improve hash unit tests
Add new unit test for calculating the average table utilization,
using random keys, based on number of entries that can be added
until we encounter one that cannot be added (bucket if full).
Also, replace current hash_perf unit test to see performance more clearly.
The current hash_perf unit test takes too long and add keys that
may or may not fit in the table and look up/delete that may not be
in the table. This new unit test gets a set of keys that we know
that fits in the table, and then measure the time to add/look up/delete
them.
Note that performance numbers include time to take a random key
from a pre-made array of keys, plus a quick check of return value.
Also, as stated above, expect higher numbers, as all operations
in the new unit tests will be successful, which means that
it will take more time, than mixing both successful and unsuccesful
operations.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2015-07-09 16:54:30 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-13 12:38:13 +00:00
|
|
|
REGISTER_TEST_COMMAND(hash_perf_autotest, test_hash_perf);
|