examples/link_status_interrupt: fix stats refresh rate
TIMER_MILLISECOND is defined as the number of CPU cycles per millisecond. The current definition is correct only for cores with frequency of 2GHz. Use DPDK API to get CPU frequency, and to define timer period. Fixes: af75078fece3 ("first public release") Cc: stable@dpdk.org Signed-off-by: Raja Zidane <rzidane@nvidia.com> Signed-off-by: Omar Awaysa <omara@nvidia.com>
This commit is contained in:
parent
2f655c9710
commit
14d460b888
@ -101,9 +101,10 @@ struct lsi_port_statistics {
|
||||
struct lsi_port_statistics port_statistics[RTE_MAX_ETHPORTS];
|
||||
|
||||
/* A tsc-based timer responsible for triggering statistics printout */
|
||||
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
|
||||
#define TIMER_MILLISECOND (rte_get_timer_hz() / 1000)
|
||||
#define MAX_TIMER_PERIOD 86400 /* 1 day max */
|
||||
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
|
||||
#define DEFAULT_TIMER_PERIOD 10UL /* default period is 10 seconds */
|
||||
static int64_t timer_period;
|
||||
|
||||
/* Print out statistics on packets dropped */
|
||||
static void
|
||||
@ -370,6 +371,8 @@ lsi_parse_args(int argc, char **argv)
|
||||
{NULL, 0, 0, 0}
|
||||
};
|
||||
|
||||
timer_period = DEFAULT_TIMER_PERIOD * TIMER_MILLISECOND * 1000;
|
||||
|
||||
argvopt = argv;
|
||||
|
||||
while ((opt = getopt_long(argc, argvopt, "p:q:T:",
|
||||
|
Loading…
x
Reference in New Issue
Block a user