examples: use factorized default Rx/Tx configuration

For apps that were using default rte_eth_rxconf and rte_eth_txconf
structures, these have been removed and now they are obtained by
calling rte_eth_dev_info_get, just before setting up RX/TX queues.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: David Marchand <david.marchand@6wind.com>
This commit is contained in:
Pablo de Lara 2014-10-01 10:49:05 +01:00 committed by Thomas Monjalon
parent 27b31ee33f
commit 81f7ecd934
25 changed files with 141 additions and 813 deletions

View File

@ -75,25 +75,6 @@
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF (32 * 1024) #define NB_MBUF (32 * 1024)
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@ -178,24 +159,6 @@ static struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES]; static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES];
struct lcore_conf { struct lcore_conf {
@ -782,7 +745,8 @@ MAIN(int argc, char **argv)
printf("txq=%u,%d,%d ", lcoreid, queueid, socketid); printf("txq=%u,%d,%d ", lcoreid, queueid, socketid);
fflush(stdout); fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, &tx_conf); socketid,
NULL);
if (ret < 0) if (ret < 0)
rte_panic("rte_eth_tx_queue_setup: err=%d, " rte_panic("rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid); "port=%d\n", ret, portid);
@ -807,7 +771,9 @@ MAIN(int argc, char **argv)
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
socketid, &rx_conf, pktmbuf_pool[socketid]); socketid,
NULL,
pktmbuf_pool[socketid]);
if (ret < 0) if (ret < 0)
rte_panic("rte_eth_rx_queue_setup: err=%d," rte_panic("rte_eth_rx_queue_setup: err=%d,"
"port=%d\n", ret, portid); "port=%d\n", ret, portid);

View File

@ -109,31 +109,6 @@
* controller's datasheet and supporting DPDK documentation for guidance * controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set. * on how these parameters should be set.
*/ */
/* RX ring configuration */
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = 8, /* Ring prefetch threshold */
.hthresh = 8, /* Ring host threshold */
.wthresh = 4, /* Ring writeback threshold */
},
.rx_free_thresh = 0, /* Immediately free RX descriptors */
};
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
/* TX ring configuration */
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36, /* Ring prefetch threshold */
.hthresh = 0, /* Ring host threshold */
.wthresh = 0, /* Ring writeback threshold */
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
/* Options for configuring ethernet port */ /* Options for configuring ethernet port */
static const struct rte_eth_conf port_conf = { static const struct rte_eth_conf port_conf = {
@ -461,13 +436,14 @@ init_port(uint8_t port)
(unsigned)port, ret); (unsigned)port, ret);
ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, rte_eth_dev_socket_id(port), ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, rte_eth_dev_socket_id(port),
&rx_conf, pktmbuf_pool); NULL,
pktmbuf_pool);
if (ret < 0) if (ret < 0)
FATAL_ERROR("Could not setup up RX queue for port%u (%d)", FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
(unsigned)port, ret); (unsigned)port, ret);
ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, rte_eth_dev_socket_id(port), ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, rte_eth_dev_socket_id(port),
&tx_conf); NULL);
if (ret < 0) if (ret < 0)
FATAL_ERROR("Could not setup up TX queue for port%u (%d)", FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
(unsigned)port, ret); (unsigned)port, ret);

View File

@ -106,25 +106,6 @@
#define NB_MBUF 8192 #define NB_MBUF 8192
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@ -208,24 +189,6 @@ static const struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
/* /*
* IPv4 forwarding table * IPv4 forwarding table
*/ */
@ -851,6 +814,8 @@ int
MAIN(int argc, char **argv) MAIN(int argc, char **argv)
{ {
struct lcore_queue_conf *qconf; struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
struct rx_queue *rxq; struct rx_queue *rxq;
int socket, ret; int socket, ret;
unsigned nb_ports; unsigned nb_ports;
@ -939,7 +904,7 @@ MAIN(int argc, char **argv)
/* init one RX queue */ /* init one RX queue */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
socket, &rx_conf, socket, NULL,
socket_direct_pool[socket]); socket_direct_pool[socket]);
if (ret < 0) { if (ret < 0) {
printf("\n"); printf("\n");
@ -961,8 +926,12 @@ MAIN(int argc, char **argv)
socket = (int) rte_lcore_to_socket_id(lcore_id); socket = (int) rte_lcore_to_socket_id(lcore_id);
printf("txq=%u,%d ", lcore_id, queueid); printf("txq=%u,%d ", lcore_id, queueid);
fflush(stdout); fflush(stdout);
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socket, &tx_conf); socket, txconf);
if (ret < 0) { if (ret < 0) {
printf("\n"); printf("\n");
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "

View File

@ -114,25 +114,6 @@
static uint32_t max_flow_num = DEF_FLOW_NUM; static uint32_t max_flow_num = DEF_FLOW_NUM;
static uint32_t max_flow_ttl = DEF_FLOW_TTL; static uint32_t max_flow_ttl = DEF_FLOW_TTL;
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 8 #define NB_SOCKETS 8
@ -236,26 +217,6 @@ static struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
.rx_free_thresh = 32,
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
.txq_flags = 0x0,
};
/* /*
* IPv4 forwarding table * IPv4 forwarding table
*/ */
@ -1058,6 +1019,8 @@ int
MAIN(int argc, char **argv) MAIN(int argc, char **argv)
{ {
struct lcore_queue_conf *qconf; struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
struct rx_queue *rxq; struct rx_queue *rxq;
int ret, socket; int ret, socket;
unsigned nb_ports; unsigned nb_ports;
@ -1146,7 +1109,7 @@ MAIN(int argc, char **argv)
/* init one RX queue */ /* init one RX queue */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
socket, &rx_conf, socket, NULL,
rxq->pool); rxq->pool);
if (ret < 0) { if (ret < 0) {
printf("\n"); printf("\n");
@ -1169,8 +1132,13 @@ MAIN(int argc, char **argv)
printf("txq=%u,%d,%d ", lcore_id, queueid, socket); printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
fflush(stdout); fflush(stdout);
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socket, &tx_conf); socket, txconf);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid); "port=%d\n", ret, portid);

View File

@ -92,25 +92,6 @@
/* allow max jumbo frame 9.5 KB */ /* allow max jumbo frame 9.5 KB */
#define JUMBO_FRAME_MAX_SIZE 0x2600 #define JUMBO_FRAME_MAX_SIZE 0x2600
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@ -176,24 +157,6 @@ static const struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
static struct rte_mempool *packet_pool, *header_pool, *clone_pool; static struct rte_mempool *packet_pool, *header_pool, *clone_pool;
@ -713,6 +676,8 @@ int
MAIN(int argc, char **argv) MAIN(int argc, char **argv)
{ {
struct lcore_queue_conf *qconf; struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int ret; int ret;
uint16_t queueid; uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0; unsigned lcore_id = 0, rx_lcore_id = 0;
@ -808,7 +773,8 @@ MAIN(int argc, char **argv)
printf("rxq=%hu ", queueid); printf("rxq=%hu ", queueid);
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
rte_eth_dev_socket_id(portid), &rx_conf, rte_eth_dev_socket_id(portid),
NULL,
packet_pool); packet_pool);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n", rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n",
@ -822,8 +788,12 @@ MAIN(int argc, char **argv)
continue; continue;
printf("txq=%u,%hu ", lcore_id, queueid); printf("txq=%u,%hu ", lcore_id, queueid);
fflush(stdout); fflush(stdout);
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
rte_lcore_to_socket_id(lcore_id), &tx_conf); rte_lcore_to_socket_id(lcore_id), txconf);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid); "port=%d\n", ret, portid);

View File

@ -125,36 +125,6 @@ struct kni_port_params {
static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS]; static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
/* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
/* RX ring configuration */
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = 8, /* Ring prefetch threshold */
.hthresh = 8, /* Ring host threshold */
.wthresh = 4, /* Ring writeback threshold */
},
.rx_free_thresh = 0, /* Immediately free RX descriptors */
};
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
/* TX ring configuration */
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36, /* Ring prefetch threshold */
.hthresh = 0, /* Ring host threshold */
.wthresh = 0, /* Ring writeback threshold */
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
/* Options for configuring ethernet port */ /* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = { static struct rte_eth_conf port_conf = {
@ -631,13 +601,13 @@ init_port(uint8_t port)
(unsigned)port, ret); (unsigned)port, ret);
ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, ret = rte_eth_rx_queue_setup(port, 0, NB_RXD,
rte_eth_dev_socket_id(port), &rx_conf, pktmbuf_pool); rte_eth_dev_socket_id(port), NULL, pktmbuf_pool);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up RX queue for " rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
"port%u (%d)\n", (unsigned)port, ret); "port%u (%d)\n", (unsigned)port, ret);
ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, ret = rte_eth_tx_queue_setup(port, 0, NB_TXD,
rte_eth_dev_socket_id(port), &tx_conf); rte_eth_dev_socket_id(port), NULL);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up TX queue for " rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
"port%u (%d)\n", (unsigned)port, ret); "port%u (%d)\n", (unsigned)port, ret);

View File

@ -63,25 +63,6 @@
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/* mask of enabled ports */ /* mask of enabled ports */
@ -117,24 +98,6 @@ static const struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
#define METADATA_NAME "l2fwd_ivshmem" #define METADATA_NAME "l2fwd_ivshmem"
#define CMDLINE_OPT_FWD_CONF "fwd-conf" #define CMDLINE_OPT_FWD_CONF "fwd-conf"
@ -789,7 +752,8 @@ int main(int argc, char **argv)
/* init one RX queue */ /* init one RX queue */
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid), &rx_conf, rte_eth_dev_socket_id(portid),
NULL,
l2fwd_ivshmem_pktmbuf_pool); l2fwd_ivshmem_pktmbuf_pool);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n", rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
@ -798,7 +762,8 @@ int main(int argc, char **argv)
/* init one TX queue on each port */ /* init one TX queue on each port */
fflush(stdout); fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid), &tx_conf); rte_eth_dev_socket_id(portid),
NULL);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, (unsigned) portid); ret, (unsigned) portid);

View File

@ -77,25 +77,6 @@
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF 8192 #define NB_MBUF 8192
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@ -147,29 +128,6 @@ static const struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
/*
* As the example won't handle mult-segments and offload cases,
* set the flag by default.
*/
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
};
struct rte_mempool * l2fwd_pktmbuf_pool = NULL; struct rte_mempool * l2fwd_pktmbuf_pool = NULL;
/* Per-port statistics struct */ /* Per-port statistics struct */
@ -701,7 +659,8 @@ MAIN(int argc, char **argv)
/* init one RX queue */ /* init one RX queue */
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid), &rx_conf, rte_eth_dev_socket_id(portid),
NULL,
l2fwd_pktmbuf_pool); l2fwd_pktmbuf_pool);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n", rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
@ -710,7 +669,8 @@ MAIN(int argc, char **argv)
/* init one TX queue on each port */ /* init one TX queue on each port */
fflush(stdout); fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid), &tx_conf); rte_eth_dev_socket_id(portid),
NULL);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, (unsigned) portid); ret, (unsigned) portid);

View File

@ -100,25 +100,6 @@
nb_lcores * MEMPOOL_CACHE_SIZE), \ nb_lcores * MEMPOOL_CACHE_SIZE), \
(unsigned)8192) (unsigned)8192)
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@ -207,26 +188,6 @@ static struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
.rx_free_thresh = 32,
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
.txq_flags = 0x0,
};
static struct rte_mempool *pktmbuf_pool[NB_SOCKETS]; static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
/***********************start of ACL part******************************/ /***********************start of ACL part******************************/
@ -1969,6 +1930,8 @@ int
MAIN(int argc, char **argv) MAIN(int argc, char **argv)
{ {
struct lcore_conf *qconf; struct lcore_conf *qconf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int ret; int ret;
unsigned nb_ports; unsigned nb_ports;
uint16_t queueid; uint16_t queueid;
@ -2056,8 +2019,13 @@ MAIN(int argc, char **argv)
printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
fflush(stdout); fflush(stdout);
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
if (port_conf.rxmode.jumbo_frame)
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, &tx_conf); socketid, txconf);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup: err=%d, " "rte_eth_tx_queue_setup: err=%d, "
@ -2091,7 +2059,7 @@ MAIN(int argc, char **argv)
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
socketid, &rx_conf, socketid, NULL,
pktmbuf_pool[socketid]); pktmbuf_pool[socketid]);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, rte_exit(EXIT_FAILURE,

View File

@ -140,25 +140,6 @@
nb_lcores*MEMPOOL_CACHE_SIZE), \ nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192) (unsigned)8192)
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 8 #define NB_SOCKETS 8
@ -255,26 +236,6 @@ static struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
.rx_free_thresh = 32,
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
.txq_flags = 0x0,
};
static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
@ -1513,6 +1474,8 @@ int
MAIN(int argc, char **argv) MAIN(int argc, char **argv)
{ {
struct lcore_conf *qconf; struct lcore_conf *qconf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int ret; int ret;
unsigned nb_ports; unsigned nb_ports;
uint16_t queueid; uint16_t queueid;
@ -1603,8 +1566,13 @@ MAIN(int argc, char **argv)
printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
fflush(stdout); fflush(stdout);
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
if (port_conf.rxmode.jumbo_frame)
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, &tx_conf); socketid, txconf);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup: err=%d, " "rte_eth_tx_queue_setup: err=%d, "
@ -1652,7 +1620,8 @@ MAIN(int argc, char **argv)
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
socketid, &rx_conf, pktmbuf_pool[socketid]); socketid, NULL,
pktmbuf_pool[socketid]);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, " "rte_eth_rx_queue_setup: err=%d, "

View File

@ -215,30 +215,6 @@ static struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
.rx_free_thresh = 32,
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
.txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOVLANOFFL |
ETH_TXQ_FLAGS_NOXSUMSCTP |
ETH_TXQ_FLAGS_NOXSUMUDP |
ETH_TXQ_FLAGS_NOXSUMTCP)
};
static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
@ -979,6 +955,8 @@ int
MAIN(int argc, char **argv) MAIN(int argc, char **argv)
{ {
struct lcore_conf *qconf; struct lcore_conf *qconf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int ret; int ret;
unsigned nb_ports; unsigned nb_ports;
uint16_t queueid; uint16_t queueid;
@ -1052,8 +1030,13 @@ MAIN(int argc, char **argv)
printf("txq=%d,%d,%d ", portid, 0, socketid); printf("txq=%d,%d,%d ", portid, 0, socketid);
fflush(stdout); fflush(stdout);
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
if (port_conf.rxmode.jumbo_frame)
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
socketid, &tx_conf); socketid, txconf);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid); "port=%d\n", ret, portid);
@ -1083,7 +1066,8 @@ MAIN(int argc, char **argv)
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
socketid, &rx_conf, pktmbuf_pool[socketid]); socketid, NULL,
pktmbuf_pool[socketid]);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
"port=%d\n", ret, portid); "port=%d\n", ret, portid);

View File

@ -137,25 +137,6 @@
nb_lcores*MEMPOOL_CACHE_SIZE), \ nb_lcores*MEMPOOL_CACHE_SIZE), \
(unsigned)8192) (unsigned)8192)
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@ -259,31 +240,6 @@ static struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
.rx_free_thresh = 32,
};
static struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
.txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOVLANOFFL |
ETH_TXQ_FLAGS_NOXSUMSCTP |
ETH_TXQ_FLAGS_NOXSUMUDP |
ETH_TXQ_FLAGS_NOXSUMTCP)
};
static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
@ -1974,7 +1930,6 @@ parse_args(int argc, char **argv)
printf("jumbo frame is enabled - disabling simple TX path\n"); printf("jumbo frame is enabled - disabling simple TX path\n");
port_conf.rxmode.jumbo_frame = 1; port_conf.rxmode.jumbo_frame = 1;
tx_conf.txq_flags = 0;
/* if no max-pkt-len set, use the default value ETHER_MAX_LEN */ /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */
if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) { if (0 == getopt_long(argc, argvopt, "", &lenopts, &option_index)) {
@ -2448,6 +2403,8 @@ int
MAIN(int argc, char **argv) MAIN(int argc, char **argv)
{ {
struct lcore_conf *qconf; struct lcore_conf *qconf;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int ret; int ret;
unsigned nb_ports; unsigned nb_ports;
uint16_t queueid; uint16_t queueid;
@ -2537,8 +2494,13 @@ MAIN(int argc, char **argv)
printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
fflush(stdout); fflush(stdout);
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
if (port_conf.rxmode.jumbo_frame)
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, &tx_conf); socketid, txconf);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid); "port=%d\n", ret, portid);
@ -2570,7 +2532,9 @@ MAIN(int argc, char **argv)
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
socketid, &rx_conf, pktmbuf_pool[socketid]); socketid,
NULL,
pktmbuf_pool[socketid]);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
"port=%d\n", ret, portid); "port=%d\n", ret, portid);

View File

@ -78,25 +78,6 @@
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF 8192 #define NB_MBUF 8192
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@ -153,24 +134,6 @@ static const struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
struct rte_mempool * lsi_pktmbuf_pool = NULL; struct rte_mempool * lsi_pktmbuf_pool = NULL;
/* Per-port statistics struct */ /* Per-port statistics struct */
@ -751,7 +714,8 @@ MAIN(int argc, char **argv)
/* init one RX queue */ /* init one RX queue */
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid), &rx_conf, rte_eth_dev_socket_id(portid),
NULL,
lsi_pktmbuf_pool); lsi_pktmbuf_pool);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n", rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n",
@ -760,7 +724,8 @@ MAIN(int argc, char **argv)
/* init one TX queue logical core on each port */ /* init one TX queue logical core on each port */
fflush(stdout); fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid), &tx_conf); rte_eth_dev_socket_id(portid),
NULL);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n", rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n",
ret, (unsigned) portid); ret, (unsigned) portid);

View File

@ -95,26 +95,6 @@ static struct rte_eth_conf port_conf = {
}, },
}; };
static struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = APP_DEFAULT_NIC_RX_PTHRESH,
.hthresh = APP_DEFAULT_NIC_RX_HTHRESH,
.wthresh = APP_DEFAULT_NIC_RX_WTHRESH,
},
.rx_free_thresh = APP_DEFAULT_NIC_RX_FREE_THRESH,
.rx_drop_en = APP_DEFAULT_NIC_RX_DROP_EN,
};
static struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = APP_DEFAULT_NIC_TX_PTHRESH,
.hthresh = APP_DEFAULT_NIC_TX_HTHRESH,
.wthresh = APP_DEFAULT_NIC_TX_WTHRESH,
},
.tx_free_thresh = APP_DEFAULT_NIC_TX_FREE_THRESH,
.tx_rs_thresh = APP_DEFAULT_NIC_TX_RS_THRESH,
};
static void static void
app_assign_worker_ids(void) app_assign_worker_ids(void)
{ {
@ -491,7 +471,7 @@ app_init_nics(void)
queue, queue,
(uint16_t) app.nic_rx_ring_size, (uint16_t) app.nic_rx_ring_size,
socket, socket,
&rx_conf, NULL,
pool); pool);
if (ret < 0) { if (ret < 0) {
rte_panic("Cannot init RX queue %u for port %u (%d)\n", rte_panic("Cannot init RX queue %u for port %u (%d)\n",
@ -512,7 +492,7 @@ app_init_nics(void)
0, 0,
(uint16_t) app.nic_tx_ring_size, (uint16_t) app.nic_tx_ring_size,
socket, socket,
&tx_conf); NULL);
if (ret < 0) { if (ret < 0) {
rte_panic("Cannot init TX queue 0 for port %d (%d)\n", rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
port, port,

View File

@ -83,41 +83,6 @@
#define NO_FLAGS 0 #define NO_FLAGS 0
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
/* Default configuration for rx and tx thresholds etc. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define MP_DEFAULT_PTHRESH 36
#define MP_DEFAULT_RX_HTHRESH 8
#define MP_DEFAULT_TX_HTHRESH 0
#define MP_DEFAULT_WTHRESH 0
static const struct rte_eth_rxconf rx_conf_default = {
.rx_thresh = {
.pthresh = MP_DEFAULT_PTHRESH,
.hthresh = MP_DEFAULT_RX_HTHRESH,
.wthresh = MP_DEFAULT_WTHRESH,
},
};
static const struct rte_eth_txconf tx_conf_default = {
.tx_thresh = {
.pthresh = MP_DEFAULT_PTHRESH,
.hthresh = MP_DEFAULT_TX_HTHRESH,
.wthresh = MP_DEFAULT_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
/* The mbuf pool for packet rx */ /* The mbuf pool for packet rx */
struct rte_mempool *pktmbuf_pool; struct rte_mempool *pktmbuf_pool;
@ -183,13 +148,15 @@ init_port(uint8_t port_num)
for (q = 0; q < rx_rings; q++) { for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size, retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
rte_eth_dev_socket_id(port_num), &rx_conf_default, pktmbuf_pool); rte_eth_dev_socket_id(port_num),
NULL, pktmbuf_pool);
if (retval < 0) return retval; if (retval < 0) return retval;
} }
for ( q = 0; q < tx_rings; q ++ ) { for ( q = 0; q < tx_rings; q ++ ) {
retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size, retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size,
rte_eth_dev_socket_id(port_num), &tx_conf_default); rte_eth_dev_socket_id(port_num),
NULL);
if (retval < 0) return retval; if (retval < 0) return retval;
} }

View File

@ -96,25 +96,6 @@ enum l2fwd_cmd{
CMD_STOP, CMD_STOP,
}; };
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@ -190,25 +171,6 @@ static const struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
};
static struct rte_mempool * l2fwd_pktmbuf_pool[RTE_MAX_ETHPORTS]; static struct rte_mempool * l2fwd_pktmbuf_pool[RTE_MAX_ETHPORTS];
/* Per-port statistics struct */ /* Per-port statistics struct */
@ -1167,7 +1129,8 @@ MAIN(int argc, char **argv)
/* init one RX queue */ /* init one RX queue */
fflush(stdout); fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid), &rx_conf, rte_eth_dev_socket_id(portid),
NULL,
l2fwd_pktmbuf_pool[portid]); l2fwd_pktmbuf_pool[portid]);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n", rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
@ -1176,7 +1139,8 @@ MAIN(int argc, char **argv)
/* init one TX queue on each port */ /* init one TX queue on each port */
fflush(stdout); fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid), &tx_conf); rte_eth_dev_socket_id(portid),
NULL);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
ret, (unsigned) portid); ret, (unsigned) portid);

View File

@ -89,36 +89,6 @@
#define PARAM_PROC_ID "proc-id" #define PARAM_PROC_ID "proc-id"
#define PARAM_NUM_PROCS "num-procs" #define PARAM_NUM_PROCS "num-procs"
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
/* Default configuration for rx and tx thresholds etc. */
static const struct rte_eth_rxconf rx_conf_default = {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 4,
},
};
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
static const struct rte_eth_txconf tx_conf_default = {
.tx_thresh = {
.pthresh = 36,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
/* for each lcore, record the elements of the ports array to use */ /* for each lcore, record the elements of the ports array to use */
struct lcore_ports{ struct lcore_ports{
unsigned start_port; unsigned start_port;
@ -277,7 +247,8 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
for (q = 0; q < rx_rings; q ++) { for (q = 0; q < rx_rings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE, retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
rte_eth_dev_socket_id(port), &rx_conf_default, rte_eth_dev_socket_id(port),
NULL,
mbuf_pool); mbuf_pool);
if (retval < 0) if (retval < 0)
return retval; return retval;
@ -285,7 +256,8 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
for (q = 0; q < tx_rings; q ++) { for (q = 0; q < tx_rings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE, retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
rte_eth_dev_socket_id(port), &tx_conf_default); rte_eth_dev_socket_id(port),
NULL);
if (retval < 0) if (retval < 0)
return retval; return retval;
} }

View File

@ -67,29 +67,6 @@ struct rte_eth_conf eth_conf = {
}, },
}; };
struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 0,
.tx_rs_thresh = 0,
.txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOVLANOFFL |
ETH_TXQ_FLAGS_NOXSUMSCTP |
ETH_TXQ_FLAGS_NOXSUMUDP |
ETH_TXQ_FLAGS_NOXSUMTCP)
};
struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 4,
},
};
#define MAX_QUEUE_NUM 1 #define MAX_QUEUE_NUM 1
#define RX_QUEUE_NUM 1 #define RX_QUEUE_NUM 1
#define TX_QUEUE_NUM 1 #define TX_QUEUE_NUM 1
@ -103,8 +80,6 @@ struct rte_eth_rxconf rx_conf = {
struct rte_netmap_port_conf port_conf = { struct rte_netmap_port_conf port_conf = {
.eth_conf = &eth_conf, .eth_conf = &eth_conf,
.tx_conf = &tx_conf,
.rx_conf = &rx_conf,
.socket_id = SOCKET_ID_ANY, .socket_id = SOCKET_ID_ANY,
.nr_tx_rings = TX_QUEUE_NUM, .nr_tx_rings = TX_QUEUE_NUM,
.nr_rx_rings = RX_QUEUE_NUM, .nr_rx_rings = RX_QUEUE_NUM,

View File

@ -713,7 +713,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
for (i = 0; i < conf->nr_tx_rings; i++) { for (i = 0; i < conf->nr_tx_rings; i++) {
ret = rte_eth_tx_queue_setup(portid, i, tx_slots, ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
conf->socket_id, conf->tx_conf); conf->socket_id, NULL);
if (ret < 0) { if (ret < 0) {
RTE_LOG(ERR, USER1, RTE_LOG(ERR, USER1,
@ -724,7 +724,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
} }
ret = rte_eth_rx_queue_setup(portid, i, rx_slots, ret = rte_eth_rx_queue_setup(portid, i, rx_slots,
conf->socket_id, conf->rx_conf, conf->pool); conf->socket_id, NULL, conf->pool);
if (ret < 0) { if (ret < 0) {
RTE_LOG(ERR, USER1, RTE_LOG(ERR, USER1,
@ -737,8 +737,6 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
/* copy config to the private storage. */ /* copy config to the private storage. */
ports[portid].eth_conf = conf->eth_conf[0]; ports[portid].eth_conf = conf->eth_conf[0];
ports[portid].rx_conf = conf->rx_conf[0];
ports[portid].tx_conf = conf->tx_conf[0];
ports[portid].pool = conf->pool; ports[portid].pool = conf->pool;
ports[portid].socket_id = conf->socket_id; ports[portid].socket_id = conf->socket_id;
ports[portid].nr_tx_rings = conf->nr_tx_rings; ports[portid].nr_tx_rings = conf->nr_tx_rings;

View File

@ -56,8 +56,6 @@ struct rte_netmap_conf {
struct rte_netmap_port_conf { struct rte_netmap_port_conf {
struct rte_eth_conf *eth_conf; struct rte_eth_conf *eth_conf;
struct rte_eth_txconf *tx_conf;
struct rte_eth_rxconf *rx_conf;
struct rte_mempool *pool; struct rte_mempool *pool;
int32_t socket_id; int32_t socket_id;
uint16_t nr_tx_rings; uint16_t nr_tx_rings;

View File

@ -102,26 +102,6 @@ static struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = 8, /* RX prefetch threshold reg */
.hthresh = 8, /* RX host threshold reg */
.wthresh = 4, /* RX write-back threshold reg */
},
.rx_free_thresh = 32,
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36, /* TX prefetch threshold reg */
.hthresh = 0, /* TX host threshold reg */
.wthresh = 0, /* TX write-back threshold reg */
},
.tx_free_thresh = 0,
.tx_rs_thresh = 0,
.txq_flags = 0x0,
};
#define NIC_RX_QUEUE_DESC 128 #define NIC_RX_QUEUE_DESC 128
#define NIC_TX_QUEUE_DESC 512 #define NIC_TX_QUEUE_DESC 512
@ -391,11 +371,15 @@ MAIN(int argc, char **argv)
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret); rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC, rte_eth_dev_socket_id(port_rx), &rx_conf, pool); ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
rte_eth_dev_socket_id(port_rx),
NULL, pool);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret); rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC, rte_eth_dev_socket_id(port_rx), &tx_conf); ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
rte_eth_dev_socket_id(port_rx),
NULL);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret); rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret);
@ -403,11 +387,15 @@ MAIN(int argc, char **argv)
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret); rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC, rte_eth_dev_socket_id(port_tx), &rx_conf, pool); ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
rte_eth_dev_socket_id(port_tx),
NULL, pool);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret); rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC, rte_eth_dev_socket_id(port_tx), &tx_conf); ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
rte_eth_dev_socket_id(port_tx),
NULL);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret); rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);

View File

@ -64,24 +64,6 @@ static const struct rte_eth_conf port_conf = {
}, },
}; };
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 4,
},
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 0,
.tx_rs_thresh = 0,
};
static struct rte_eth_fc_conf fc_conf = { static struct rte_eth_fc_conf fc_conf = {
.mode = RTE_FC_TX_PAUSE, .mode = RTE_FC_TX_PAUSE,
.high_water = 80 * 510 / 100, .high_water = 80 * 510 / 100,
@ -104,15 +86,17 @@ void configure_eth_port(uint8_t port_id)
/* Initialize the port's RX queue */ /* Initialize the port's RX queue */
ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE, ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE,
rte_eth_dev_socket_id(port_id), &rx_conf, rte_eth_dev_socket_id(port_id),
mbuf_pool); NULL,
mbuf_pool);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Failed to setup RX queue on " rte_exit(EXIT_FAILURE, "Failed to setup RX queue on "
"port %u (error %d)\n", (unsigned) port_id, ret); "port %u (error %d)\n", (unsigned) port_id, ret);
/* Initialize the port's TX queue */ /* Initialize the port's TX queue */
ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE, ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE,
rte_eth_dev_socket_id(port_id), &tx_conf); rte_eth_dev_socket_id(port_id),
NULL);
if (ret < 0) if (ret < 0)
rte_exit(EXIT_FAILURE, "Failed to setup TX queue on " rte_exit(EXIT_FAILURE, "Failed to setup TX queue on "
"port %u (error %d)\n", (unsigned) port_id, ret); "port %u (error %d)\n", (unsigned) port_id, ret);

View File

@ -135,31 +135,6 @@ static uint32_t enable_vm2vm = 1;
/* Enable stats. */ /* Enable stats. */
static uint32_t enable_stats = 0; static uint32_t enable_stats = 0;
/* Default configuration for rx and tx thresholds etc. */
static const struct rte_eth_rxconf rx_conf_default = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
.rx_drop_en = 1,
};
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe/igb PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
static const struct rte_eth_txconf tx_conf_default = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
/* empty vmdq configuration structure. Filled in programatically */ /* empty vmdq configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_conf_default = { static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = { .rxmode = {
@ -301,6 +276,7 @@ static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool) port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{ {
struct rte_eth_dev_info dev_info; struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf *rxconf;
struct rte_eth_conf port_conf; struct rte_eth_conf port_conf;
uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count(); uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
@ -331,17 +307,21 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0) if (retval != 0)
return retval; return retval;
rte_eth_dev_info_get(port, &dev_info);
rxconf = &dev_info.default_rxconf;
rxconf->rx_drop_en = 1;
/* Setup the queues. */ /* Setup the queues. */
for (q = 0; q < rx_rings; q ++) { for (q = 0; q < rx_rings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
rte_eth_dev_socket_id(port), &rx_conf_default, rte_eth_dev_socket_id(port), rxconf,
mbuf_pool); mbuf_pool);
if (retval < 0) if (retval < 0)
return retval; return retval;
} }
for (q = 0; q < tx_rings; q ++) { for (q = 0; q < tx_rings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
rte_eth_dev_socket_id(port), &tx_conf_default); rte_eth_dev_socket_id(port),
NULL);
if (retval < 0) if (retval < 0)
return retval; return retval;
} }

View File

@ -81,25 +81,6 @@
#define MBUF_CACHE_SIZE 64 #define MBUF_CACHE_SIZE 64
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32 #define MAX_PKT_BURST 32
/* /*
@ -117,37 +98,6 @@ static uint32_t enabled_port_mask = 0;
static uint32_t num_queues = 8; static uint32_t num_queues = 8;
static uint32_t num_pools = 8; static uint32_t num_pools = 8;
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
/* Default configuration for rx and tx thresholds etc. */
static const struct rte_eth_rxconf rx_conf_default = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
.rx_drop_en = 1,
};
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe/igb PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
static const struct rte_eth_txconf tx_conf_default = {
.tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
/* empty vmdq configuration structure. Filled in programatically */ /* empty vmdq configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_conf_default = { static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = { .rxmode = {
@ -283,6 +233,7 @@ static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool) port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{ {
struct rte_eth_dev_info dev_info; struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf *rxconf;
struct rte_eth_conf port_conf; struct rte_eth_conf port_conf;
uint16_t rxRings, txRings = (uint16_t)rte_lcore_count(); uint16_t rxRings, txRings = (uint16_t)rte_lcore_count();
const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize = RTE_TEST_TX_DESC_DEFAULT; const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize = RTE_TEST_TX_DESC_DEFAULT;
@ -308,17 +259,22 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
if (retval != 0) if (retval != 0)
return retval; return retval;
rte_eth_dev_info_get(port, &dev_info);
rxconf = &dev_info.default_rxconf;
rxconf->rx_drop_en = 1;
for (q = 0; q < rxRings; q ++) { for (q = 0; q < rxRings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, rxRingSize, retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
rte_eth_dev_socket_id(port), &rx_conf_default, rte_eth_dev_socket_id(port),
mbuf_pool); rxconf,
mbuf_pool);
if (retval < 0) if (retval < 0)
return retval; return retval;
} }
for (q = 0; q < txRings; q ++) { for (q = 0; q < txRings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, txRingSize, retval = rte_eth_tx_queue_setup(port, q, txRingSize,
rte_eth_dev_socket_id(port), &tx_conf_default); rte_eth_dev_socket_id(port),
NULL);
if (retval < 0) if (retval < 0)
return retval; return retval;
} }

View File

@ -87,36 +87,6 @@ static uint32_t enabled_port_mask = 0;
/* number of pools (if user does not specify any, 16 by default */ /* number of pools (if user does not specify any, 16 by default */
static enum rte_eth_nb_pools num_pools = ETH_16_POOLS; static enum rte_eth_nb_pools num_pools = ETH_16_POOLS;
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
* controller's datasheet and supporting DPDK documentation for guidance
* on how these parameters should be set.
*/
/* Default configuration for rx and tx thresholds etc. */
static const struct rte_eth_rxconf rx_conf_default = {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 4,
},
};
/*
* These default values are optimized for use with the Intel(R) 82599 10 GbE
* Controller and the DPDK ixgbe PMD. Consider using other values for other
* network controllers and/or network drivers.
*/
static const struct rte_eth_txconf tx_conf_default = {
.tx_thresh = {
.pthresh = 36,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
};
/* empty vmdq+dcb configuration structure. Filled in programatically */ /* empty vmdq+dcb configuration structure. Filled in programatically */
static const struct rte_eth_conf vmdq_dcb_conf_default = { static const struct rte_eth_conf vmdq_dcb_conf_default = {
.rxmode = { .rxmode = {
@ -212,7 +182,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
for (q = 0; q < rxRings; q ++) { for (q = 0; q < rxRings; q ++) {
retval = rte_eth_rx_queue_setup(port, q, rxRingSize, retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
rte_eth_dev_socket_id(port), &rx_conf_default, rte_eth_dev_socket_id(port),
NULL,
mbuf_pool); mbuf_pool);
if (retval < 0) if (retval < 0)
return retval; return retval;
@ -220,7 +191,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
for (q = 0; q < txRings; q ++) { for (q = 0; q < txRings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, txRingSize, retval = rte_eth_tx_queue_setup(port, q, txRingSize,
rte_eth_dev_socket_id(port), &tx_conf_default); rte_eth_dev_socket_id(port),
NULL);
if (retval < 0) if (retval < 0)
return retval; return retval;
} }