trace: fix dynamically enabling trace points

Enabling trace points at runtime was not working if no trace point had
been enabled first at rte_eal_init() time. The reason was that
trace.args reflected the arguments passed to --trace= EAL option.

To fix this:
- the trace subsystem initialisation is updated: trace directory
  creation is deferred to when traces are dumped (to avoid creating
  directories that may not be used),
- per lcore memory allocation still relies on rte_trace_is_enabled() but
  this helper now tracks if any trace point is enabled. The
  documentation is updated accordingly,
- cleanup helpers must always be called in rte_eal_cleanup() since some
  trace points might have been enabled and disabled in the lifetime of
  the DPDK application,

With this fix, we can update the unit test and check that a trace point
callback is invoked when expected.

Note:
- the 'trace' global variable might be shadowed with the argument
  passed to the functions dealing with trace point handles.
  'tp' has been used for referring to trace_point object.
  Prefer 't' for referring to handles,

Fixes: 84c4fae462 ("trace: implement operation APIs")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Sunil Kumar Kori <skori@marvell.com>
This commit is contained in:
David Marchand 2022-09-14 14:05:04 +02:00
parent 3ee927d3e4
commit d6fd5a018e
6 changed files with 65 additions and 38 deletions

View File

@ -9,6 +9,8 @@
#include "test.h"
#include "test_trace.h"
int app_dpdk_test_tp_count;
#ifdef RTE_EXEC_ENV_WINDOWS
static int
@ -95,8 +97,15 @@ test_trace_point_regex(void)
static int32_t
test_trace_point_disable_enable(void)
{
int expected;
int rc;
/* At tp registration, the associated counter increases once. */
expected = 1;
TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected,
"Expecting %d, but got %d for app_dpdk_test_tp_count",
expected, app_dpdk_test_tp_count);
rc = rte_trace_point_disable(&__app_dpdk_test_tp);
if (rc < 0)
goto failed;
@ -104,6 +113,12 @@ test_trace_point_disable_enable(void)
if (rte_trace_point_is_enabled(&__app_dpdk_test_tp))
goto failed;
/* No emission expected */
app_dpdk_test_tp("app.dpdk.test.tp");
TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected,
"Expecting %d, but got %d for app_dpdk_test_tp_count",
expected, app_dpdk_test_tp_count);
rc = rte_trace_point_enable(&__app_dpdk_test_tp);
if (rc < 0)
goto failed;
@ -113,6 +128,11 @@ test_trace_point_disable_enable(void)
/* Emit the trace */
app_dpdk_test_tp("app.dpdk.test.tp");
expected++;
TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected,
"Expecting %d, but got %d for app_dpdk_test_tp_count",
expected, app_dpdk_test_tp_count);
return TEST_SUCCESS;
failed:

View File

@ -3,10 +3,12 @@
*/
#include <rte_trace_point.h>
extern int app_dpdk_test_tp_count;
RTE_TRACE_POINT(
app_dpdk_test_tp,
RTE_TRACE_POINT_ARGS(const char *str),
rte_trace_point_emit_string(str);
app_dpdk_test_tp_count++;
)
RTE_TRACE_POINT_FP(

View File

@ -271,10 +271,16 @@ Trace memory
The trace memory will be allocated through an internal function
``__rte_trace_mem_per_thread_alloc()``. The trace memory will be allocated
per thread to enable lock less trace-emit function.
The memory for the trace memory for DPDK lcores will be allocated on
``rte_eal_init()`` if the trace is enabled through a EAL option.
For non DPDK threads, on the first trace emission, the memory will be
allocated.
For non lcore threads, the trace memory is allocated on the first trace
emission.
For lcore threads, if trace points are enabled through a EAL option, the trace
memory is allocated when the threads are known of DPDK
(``rte_eal_init`` for EAL lcores, ``rte_thread_register`` for non-EAL lcores).
Otherwise, when trace points are enabled later in the life of the application,
the behavior is the same as non lcore threads and the trace memory is allocated
on the first trace emission.
Trace memory layout
~~~~~~~~~~~~~~~~~~~

View File

@ -48,12 +48,6 @@ eal_trace_init(void)
goto fail;
}
if (!STAILQ_EMPTY(&trace.args))
trace.status = true;
if (!rte_trace_is_enabled())
return 0;
rte_spinlock_init(&trace.lock);
/* Is duplicate trace name registered */
@ -72,13 +66,9 @@ eal_trace_init(void)
if (trace_metadata_create() < 0)
goto fail;
/* Create trace directory */
if (trace_mkdir())
goto free_meta;
/* Save current epoch timestamp for future use */
if (trace_epoch_time_save() < 0)
goto fail;
goto free_meta;
/* Apply global configurations */
STAILQ_FOREACH(arg, &trace.args, next)
@ -98,8 +88,6 @@ eal_trace_init(void)
void
eal_trace_fini(void)
{
if (!rte_trace_is_enabled())
return;
trace_mem_free();
trace_metadata_destroy();
eal_trace_args_free();
@ -108,17 +96,17 @@ eal_trace_fini(void)
bool
rte_trace_is_enabled(void)
{
return trace.status;
return __atomic_load_n(&trace.status, __ATOMIC_ACQUIRE) != 0;
}
static void
trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
trace_mode_set(rte_trace_point_t *t, enum rte_trace_mode mode)
{
if (mode == RTE_TRACE_MODE_OVERWRITE)
__atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
__atomic_and_fetch(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
__ATOMIC_RELEASE);
else
__atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
__atomic_or_fetch(t, __RTE_TRACE_FIELD_ENABLE_DISCARD,
__ATOMIC_RELEASE);
}
@ -146,36 +134,42 @@ trace_point_is_invalid(rte_trace_point_t *t)
}
bool
rte_trace_point_is_enabled(rte_trace_point_t *trace)
rte_trace_point_is_enabled(rte_trace_point_t *t)
{
uint64_t val;
if (trace_point_is_invalid(trace))
if (trace_point_is_invalid(t))
return false;
val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
val = __atomic_load_n(t, __ATOMIC_ACQUIRE);
return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
}
int
rte_trace_point_enable(rte_trace_point_t *trace)
rte_trace_point_enable(rte_trace_point_t *t)
{
if (trace_point_is_invalid(trace))
uint64_t prev;
if (trace_point_is_invalid(t))
return -ERANGE;
__atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
__ATOMIC_RELEASE);
prev = __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE);
if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0)
__atomic_add_fetch(&trace.status, 1, __ATOMIC_RELEASE);
return 0;
}
int
rte_trace_point_disable(rte_trace_point_t *trace)
rte_trace_point_disable(rte_trace_point_t *t)
{
if (trace_point_is_invalid(trace))
uint64_t prev;
if (trace_point_is_invalid(t))
return -ERANGE;
__atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
__ATOMIC_RELEASE);
prev = __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE);
if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0)
__atomic_sub_fetch(&trace.status, 1, __ATOMIC_RELEASE);
return 0;
}
@ -417,9 +411,6 @@ trace_mem_free(void)
struct trace *trace = trace_obj_get();
uint32_t count;
if (!rte_trace_is_enabled())
return;
rte_spinlock_lock(&trace->lock);
for (count = 0; count < trace->nb_trace_mem_list; count++) {
trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);

View File

@ -314,14 +314,18 @@ trace_dir_default_path_get(char *dir_path)
return 0;
}
int
static int
trace_mkdir(void)
{
struct trace *trace = trace_obj_get();
char session[TRACE_DIR_STR_LEN];
static bool already_done;
char *dir_path;
int rc;
if (already_done)
return 0;
if (!trace->dir_offset) {
dir_path = calloc(1, sizeof(trace->dir));
if (dir_path == NULL) {
@ -365,6 +369,7 @@ trace_mkdir(void)
}
RTE_LOG(INFO, EAL, "Trace dir: %s\n", trace->dir);
already_done = true;
return 0;
}
@ -434,6 +439,10 @@ rte_trace_save(void)
if (trace->nb_trace_mem_list == 0)
return rc;
rc = trace_mkdir();
if (rc < 0)
return rc;
rc = trace_meta_save(trace);
if (rc)
return rc;

View File

@ -54,7 +54,7 @@ struct trace {
char dir[PATH_MAX];
int dir_offset;
int register_errno;
bool status;
uint32_t status;
enum rte_trace_mode mode;
rte_uuid_t uuid;
uint32_t buff_len;
@ -104,7 +104,6 @@ void trace_uuid_generate(void);
int trace_metadata_create(void);
void trace_metadata_destroy(void);
char *trace_metadata_fixup_field(const char *field);
int trace_mkdir(void);
int trace_epoch_time_save(void);
void trace_mem_free(void);
void trace_mem_per_thread_free(void);