test/mcslock: remove unneeded per lcore copy

Each core already comes with its local storage for mcslock (in its
stack), therefore there is no need to define an additional per-lcore
mcslock.

Fixes: 32dcb9fd2a ("test/mcslock: add MCS queued lock unit test")

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
This commit is contained in:
Olivier Matz 2020-11-04 18:04:25 +01:00 committed by David Marchand
parent de6aede17b
commit 5b3ada04f7

View File

@ -37,10 +37,6 @@
* lock multiple times.
*/
RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me);
RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me);
RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me);
rte_mcslock_t *p_ml;
rte_mcslock_t *p_ml_try;
rte_mcslock_t *p_ml_perf;
@ -53,7 +49,7 @@ static int
test_mcslock_per_core(__rte_unused void *arg)
{
/* Per core me node. */
rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
rte_mcslock_t ml_me;
rte_mcslock_lock(&p_ml, &ml_me);
printf("MCS lock taken on core %u\n", rte_lcore_id());
@ -77,7 +73,7 @@ load_loop_fn(void *func_param)
const unsigned int lcore = rte_lcore_id();
/**< Per core me node. */
rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me);
rte_mcslock_t ml_perf_me;
/* wait synchro */
while (rte_atomic32_read(&synchro) == 0)
@ -151,8 +147,8 @@ static int
test_mcslock_try(__rte_unused void *arg)
{
/**< Per core me node. */
rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
rte_mcslock_t ml_me;
rte_mcslock_t ml_try_me;
/* Locked ml_try in the main lcore, so it should fail
* when trying to lock it in the worker lcore.
@ -178,8 +174,8 @@ test_mcslock(void)
int i;
/* Define per core me node. */
rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
rte_mcslock_t ml_me;
rte_mcslock_t ml_try_me;
/*
* Test mcs lock & unlock on each core