numam-dpdk/lib/librte_eal/common/eal_common_mcfg.c
Anatoly Burakov f9d6cd8bfe timer: fix resource leak in finalize
Currently, whenever timer library is initialized, the memory
is leaked because there is no telling when primary or secondary
processes get to use the state, and there is no way to
initialize/deinitialize timer library state without race
conditions [1] because the data itself must live in shared memory.

Add a spinlock to the shared mem config to have a way to
exclusively initialize/deinitialize the timer library without
any races, and implement the synchronization mechanism based
on this lock in the timer library.

Also, update the API doc. Note that the behavior of the API
itself did not change - the requirement to call init in every
process was simply not documented explicitly.

[1] See the following email thread:
https://mails.dpdk.org/archives/dev/2019-May/131498.html

Fixes: c0749f7096c7 ("timer: allow management in shared memory")
Cc: stable@dpdk.org

Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Acked-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
Reviewed-by: David Marchand <david.marchand@redhat.com>
2019-07-06 10:32:40 +02:00

164 lines
3.6 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019 Intel Corporation
*/
#include <rte_config.h>
#include <rte_eal_memconfig.h>
#include <rte_version.h>
#include "eal_internal_cfg.h"
#include "eal_memcfg.h"
void
eal_mcfg_complete(void)
{
struct rte_config *cfg = rte_eal_get_configuration();
struct rte_mem_config *mcfg = cfg->mem_config;
/* ALL shared mem_config related INIT DONE */
if (cfg->process_type == RTE_PROC_PRIMARY)
mcfg->magic = RTE_MAGIC;
internal_config.init_complete = 1;
}
void
eal_mcfg_wait_complete(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
/* wait until shared mem_config finish initialising */
while (mcfg->magic != RTE_MAGIC)
rte_pause();
}
int
eal_mcfg_check_version(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
/* check if version from memconfig matches compiled in macro */
if (mcfg->version != RTE_VERSION)
return -1;
return 0;
}
void
eal_mcfg_update_internal(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
internal_config.legacy_mem = mcfg->legacy_mem;
internal_config.single_file_segments = mcfg->single_file_segments;
}
void
eal_mcfg_update_from_internal(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
mcfg->legacy_mem = internal_config.legacy_mem;
mcfg->single_file_segments = internal_config.single_file_segments;
/* record current DPDK version */
mcfg->version = RTE_VERSION;
}
void
rte_mcfg_mem_read_lock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
}
void
rte_mcfg_mem_read_unlock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
}
void
rte_mcfg_mem_write_lock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
}
void
rte_mcfg_mem_write_unlock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
}
void
rte_mcfg_tailq_read_lock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_read_lock(&mcfg->qlock);
}
void
rte_mcfg_tailq_read_unlock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_read_unlock(&mcfg->qlock);
}
void
rte_mcfg_tailq_write_lock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_lock(&mcfg->qlock);
}
void
rte_mcfg_tailq_write_unlock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_unlock(&mcfg->qlock);
}
void
rte_mcfg_mempool_read_lock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_read_lock(&mcfg->mplock);
}
void
rte_mcfg_mempool_read_unlock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_read_unlock(&mcfg->mplock);
}
void
rte_mcfg_mempool_write_lock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_lock(&mcfg->mplock);
}
void
rte_mcfg_mempool_write_unlock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_rwlock_write_unlock(&mcfg->mplock);
}
void
rte_mcfg_timer_lock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_spinlock_lock(&mcfg->tlock);
}
void
rte_mcfg_timer_unlock(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
rte_spinlock_unlock(&mcfg->tlock);
}