examples: time fixes
Signed-off-by: Intel
This commit is contained in:
parent
1c17baf486
commit
5c95261dbf
@ -96,7 +96,7 @@
|
|||||||
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
||||||
|
|
||||||
#define MAX_PKT_BURST 32
|
#define MAX_PKT_BURST 32
|
||||||
#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
|
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
|
||||||
|
|
||||||
#define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
|
#define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
|
||||||
#define TSC_COUNT_LIMIT 1000
|
#define TSC_COUNT_LIMIT 1000
|
||||||
@ -327,6 +327,7 @@ main_loop(__attribute__((unused)) void *dummy)
|
|||||||
{
|
{
|
||||||
uint32_t lcoreid;
|
uint32_t lcoreid;
|
||||||
struct lcore_conf *qconf;
|
struct lcore_conf *qconf;
|
||||||
|
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
|
||||||
|
|
||||||
lcoreid = rte_lcore_id();
|
lcoreid = rte_lcore_id();
|
||||||
qconf = &lcore_conf[lcoreid];
|
qconf = &lcore_conf[lcoreid];
|
||||||
@ -346,7 +347,7 @@ main_loop(__attribute__((unused)) void *dummy)
|
|||||||
tsc = rte_rdtsc();
|
tsc = rte_rdtsc();
|
||||||
|
|
||||||
diff_tsc = tsc - qconf->tsc;
|
diff_tsc = tsc - qconf->tsc;
|
||||||
if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
|
if (unlikely(diff_tsc > drain_tsc)) {
|
||||||
nic_tx_flush_queues(qconf);
|
nic_tx_flush_queues(qconf);
|
||||||
crypto_flush_tx_queue(lcoreid);
|
crypto_flush_tx_queue(lcoreid);
|
||||||
qconf->tsc = tsc;
|
qconf->tsc = tsc;
|
||||||
|
@ -111,7 +111,7 @@
|
|||||||
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
||||||
|
|
||||||
#define MAX_PKT_BURST 32
|
#define MAX_PKT_BURST 32
|
||||||
#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
|
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
|
||||||
|
|
||||||
/* Configure how many packets ahead to prefetch, when reading packets */
|
/* Configure how many packets ahead to prefetch, when reading packets */
|
||||||
#define PREFETCH_OFFSET 3
|
#define PREFETCH_OFFSET 3
|
||||||
@ -309,11 +309,13 @@ main_loop(__attribute__((unused)) void *dummy)
|
|||||||
{
|
{
|
||||||
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
||||||
unsigned lcore_id;
|
unsigned lcore_id;
|
||||||
uint64_t prev_tsc = 0;
|
uint64_t prev_tsc, diff_tsc, cur_tsc;
|
||||||
uint64_t diff_tsc, cur_tsc;
|
|
||||||
int i, j, nb_rx;
|
int i, j, nb_rx;
|
||||||
uint8_t portid;
|
uint8_t portid;
|
||||||
struct lcore_queue_conf *qconf;
|
struct lcore_queue_conf *qconf;
|
||||||
|
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
|
||||||
|
|
||||||
|
prev_tsc = 0;
|
||||||
|
|
||||||
lcore_id = rte_lcore_id();
|
lcore_id = rte_lcore_id();
|
||||||
qconf = &lcore_queue_conf[lcore_id];
|
qconf = &lcore_queue_conf[lcore_id];
|
||||||
@ -340,7 +342,7 @@ main_loop(__attribute__((unused)) void *dummy)
|
|||||||
* TX burst queue drain
|
* TX burst queue drain
|
||||||
*/
|
*/
|
||||||
diff_tsc = cur_tsc - prev_tsc;
|
diff_tsc = cur_tsc - prev_tsc;
|
||||||
if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
|
if (unlikely(diff_tsc > drain_tsc)) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This could be optimized (use queueid instead of
|
* This could be optimized (use queueid instead of
|
||||||
|
@ -113,7 +113,7 @@
|
|||||||
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
||||||
|
|
||||||
#define MAX_PKT_BURST 32
|
#define MAX_PKT_BURST 32
|
||||||
#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
|
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
|
||||||
|
|
||||||
/* Configure how many packets ahead to prefetch, when reading packets */
|
/* Configure how many packets ahead to prefetch, when reading packets */
|
||||||
#define PREFETCH_OFFSET 3
|
#define PREFETCH_OFFSET 3
|
||||||
@ -450,9 +450,10 @@ send_timeout_burst(struct lcore_queue_conf *qconf)
|
|||||||
{
|
{
|
||||||
uint64_t cur_tsc;
|
uint64_t cur_tsc;
|
||||||
uint8_t portid;
|
uint8_t portid;
|
||||||
|
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
|
||||||
|
|
||||||
cur_tsc = rte_rdtsc();
|
cur_tsc = rte_rdtsc();
|
||||||
if (likely (cur_tsc < qconf->tx_tsc + BURST_TX_DRAIN))
|
if (likely (cur_tsc < qconf->tx_tsc + drain_tsc))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (portid = 0; portid < MAX_PORTS; portid++) {
|
for (portid = 0; portid < MAX_PORTS; portid++) {
|
||||||
|
@ -98,7 +98,7 @@
|
|||||||
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
||||||
|
|
||||||
#define MAX_PKT_BURST 32
|
#define MAX_PKT_BURST 32
|
||||||
#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
|
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Configurable number of RX/TX ring descriptors
|
* Configurable number of RX/TX ring descriptors
|
||||||
@ -300,11 +300,12 @@ l2fwd_main_loop(void)
|
|||||||
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
||||||
struct rte_mbuf *m;
|
struct rte_mbuf *m;
|
||||||
unsigned lcore_id;
|
unsigned lcore_id;
|
||||||
uint64_t prev_tsc = 0;
|
uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
|
||||||
uint64_t diff_tsc, cur_tsc, timer_tsc;
|
|
||||||
unsigned i, j, portid, nb_rx;
|
unsigned i, j, portid, nb_rx;
|
||||||
struct lcore_queue_conf *qconf;
|
struct lcore_queue_conf *qconf;
|
||||||
|
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
|
||||||
|
|
||||||
|
prev_tsc = 0;
|
||||||
timer_tsc = 0;
|
timer_tsc = 0;
|
||||||
|
|
||||||
lcore_id = rte_lcore_id();
|
lcore_id = rte_lcore_id();
|
||||||
@ -332,7 +333,7 @@ l2fwd_main_loop(void)
|
|||||||
* TX burst queue drain
|
* TX burst queue drain
|
||||||
*/
|
*/
|
||||||
diff_tsc = cur_tsc - prev_tsc;
|
diff_tsc = cur_tsc - prev_tsc;
|
||||||
if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
|
if (unlikely(diff_tsc > drain_tsc)) {
|
||||||
|
|
||||||
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
|
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
|
||||||
if (qconf->tx_mbufs[portid].len == 0)
|
if (qconf->tx_mbufs[portid].len == 0)
|
||||||
|
@ -117,7 +117,7 @@
|
|||||||
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
||||||
|
|
||||||
#define MAX_PKT_BURST 32
|
#define MAX_PKT_BURST 32
|
||||||
#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
|
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
|
||||||
|
|
||||||
#define NB_SOCKETS 8
|
#define NB_SOCKETS 8
|
||||||
|
|
||||||
@ -500,11 +500,13 @@ main_loop(__attribute__((unused)) void *dummy)
|
|||||||
{
|
{
|
||||||
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
||||||
unsigned lcore_id;
|
unsigned lcore_id;
|
||||||
uint64_t prev_tsc = 0;
|
uint64_t prev_tsc, diff_tsc, cur_tsc;
|
||||||
uint64_t diff_tsc, cur_tsc;
|
|
||||||
int i, j, nb_rx;
|
int i, j, nb_rx;
|
||||||
uint8_t portid, queueid;
|
uint8_t portid, queueid;
|
||||||
struct lcore_conf *qconf;
|
struct lcore_conf *qconf;
|
||||||
|
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
|
||||||
|
|
||||||
|
prev_tsc = 0;
|
||||||
|
|
||||||
lcore_id = rte_lcore_id();
|
lcore_id = rte_lcore_id();
|
||||||
qconf = &lcore_conf[lcore_id];
|
qconf = &lcore_conf[lcore_id];
|
||||||
@ -532,7 +534,7 @@ main_loop(__attribute__((unused)) void *dummy)
|
|||||||
* TX burst queue drain
|
* TX burst queue drain
|
||||||
*/
|
*/
|
||||||
diff_tsc = cur_tsc - prev_tsc;
|
diff_tsc = cur_tsc - prev_tsc;
|
||||||
if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
|
if (unlikely(diff_tsc > drain_tsc)) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This could be optimized (use queueid instead of
|
* This could be optimized (use queueid instead of
|
||||||
|
@ -146,7 +146,7 @@
|
|||||||
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
||||||
|
|
||||||
#define MAX_PKT_BURST 32
|
#define MAX_PKT_BURST 32
|
||||||
#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
|
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
|
||||||
|
|
||||||
#define NB_SOCKETS 8
|
#define NB_SOCKETS 8
|
||||||
|
|
||||||
@ -650,11 +650,13 @@ main_loop(__attribute__((unused)) void *dummy)
|
|||||||
{
|
{
|
||||||
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
||||||
unsigned lcore_id;
|
unsigned lcore_id;
|
||||||
uint64_t prev_tsc = 0;
|
uint64_t prev_tsc, diff_tsc, cur_tsc;
|
||||||
uint64_t diff_tsc, cur_tsc;
|
|
||||||
int i, j, nb_rx;
|
int i, j, nb_rx;
|
||||||
uint8_t portid, queueid;
|
uint8_t portid, queueid;
|
||||||
struct lcore_conf *qconf;
|
struct lcore_conf *qconf;
|
||||||
|
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
|
||||||
|
|
||||||
|
prev_tsc = 0;
|
||||||
|
|
||||||
lcore_id = rte_lcore_id();
|
lcore_id = rte_lcore_id();
|
||||||
qconf = &lcore_conf[lcore_id];
|
qconf = &lcore_conf[lcore_id];
|
||||||
@ -682,7 +684,7 @@ main_loop(__attribute__((unused)) void *dummy)
|
|||||||
* TX burst queue drain
|
* TX burst queue drain
|
||||||
*/
|
*/
|
||||||
diff_tsc = cur_tsc - prev_tsc;
|
diff_tsc = cur_tsc - prev_tsc;
|
||||||
if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
|
if (unlikely(diff_tsc > drain_tsc)) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This could be optimized (use queueid instead of
|
* This could be optimized (use queueid instead of
|
||||||
|
@ -99,7 +99,7 @@
|
|||||||
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
|
||||||
|
|
||||||
#define MAX_PKT_BURST 32
|
#define MAX_PKT_BURST 32
|
||||||
#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
|
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Configurable number of RX/TX ring descriptors
|
* Configurable number of RX/TX ring descriptors
|
||||||
@ -317,11 +317,12 @@ lsi_main_loop(void)
|
|||||||
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
||||||
struct rte_mbuf *m;
|
struct rte_mbuf *m;
|
||||||
unsigned lcore_id;
|
unsigned lcore_id;
|
||||||
uint64_t prev_tsc = 0;
|
uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
|
||||||
uint64_t diff_tsc, cur_tsc, timer_tsc;
|
|
||||||
unsigned i, j, portid, nb_rx;
|
unsigned i, j, portid, nb_rx;
|
||||||
struct lcore_queue_conf *qconf;
|
struct lcore_queue_conf *qconf;
|
||||||
|
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
|
||||||
|
|
||||||
|
prev_tsc = 0;
|
||||||
timer_tsc = 0;
|
timer_tsc = 0;
|
||||||
|
|
||||||
lcore_id = rte_lcore_id();
|
lcore_id = rte_lcore_id();
|
||||||
@ -349,7 +350,7 @@ lsi_main_loop(void)
|
|||||||
* TX burst queue drain
|
* TX burst queue drain
|
||||||
*/
|
*/
|
||||||
diff_tsc = cur_tsc - prev_tsc;
|
diff_tsc = cur_tsc - prev_tsc;
|
||||||
if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
|
if (unlikely(diff_tsc > drain_tsc)) {
|
||||||
|
|
||||||
/* this could be optimized (use queueid instead of
|
/* this could be optimized (use queueid instead of
|
||||||
* portid), but it is not called so often */
|
* portid), but it is not called so often */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user