cxgbe: enable jumbo frames
Increase max_rx_pktlen to accommodate jumbo frame size. Perform sanity checks and enable jumbo mode in rx queue setup. Set link mtu based on max_rx_pktlen. Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com> Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com>
This commit is contained in:
parent
bf89cbedd2
commit
4b2eff452d
@ -50,6 +50,7 @@ CXGBE PMD has support for:
|
|||||||
- Promiscuous mode
|
- Promiscuous mode
|
||||||
- All multicast mode
|
- All multicast mode
|
||||||
- Port hardware statistics
|
- Port hardware statistics
|
||||||
|
- Jumbo frames
|
||||||
|
|
||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
@ -562,3 +563,27 @@ To disable again, run:
|
|||||||
|
|
||||||
testpmd> set flow_ctrl rx off tx off 0 0 0 0 mac_ctrl_frame_fwd off autoneg off 0
|
testpmd> set flow_ctrl rx off tx off 0 0 0 0 mac_ctrl_frame_fwd off autoneg off 0
|
||||||
testpmd> set flow_ctrl rx off tx off 0 0 0 0 mac_ctrl_frame_fwd off autoneg off 1
|
testpmd> set flow_ctrl rx off tx off 0 0 0 0 mac_ctrl_frame_fwd off autoneg off 1
|
||||||
|
|
||||||
|
Jumbo Mode
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
There are two ways to enable sending and receiving of jumbo frames via testpmd.
|
||||||
|
One method involves using the **mtu** command, which changes the mtu of an
|
||||||
|
individual port without having to stop the selected port. Another method
|
||||||
|
involves stopping all the ports first and then running **max-pkt-len** command
|
||||||
|
to configure the mtu of all the ports with a single command.
|
||||||
|
|
||||||
|
- To configure each port individually, run the mtu command as follows:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
testpmd> port config mtu 0 9000
|
||||||
|
testpmd> port config mtu 1 9000
|
||||||
|
|
||||||
|
- To configure all the ports at once, stop all the ports first and run the
|
||||||
|
max-pkt-len command as follows:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
testpmd> port stop all
|
||||||
|
testpmd> port config all max-pkt-len 9000
|
||||||
|
@ -6,6 +6,7 @@ New Features
|
|||||||
|
|
||||||
* **Enhanced support for the Chelsio CXGBE driver.**
|
* **Enhanced support for the Chelsio CXGBE driver.**
|
||||||
|
|
||||||
|
* Added support for Jumbo Frames.
|
||||||
* Optimize forwarding performance for Chelsio T5 40GbE cards.
|
* Optimize forwarding performance for Chelsio T5 40GbE cards.
|
||||||
|
|
||||||
|
|
||||||
|
@ -43,6 +43,9 @@
|
|||||||
#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */
|
#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */
|
||||||
#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */
|
#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */
|
||||||
|
|
||||||
|
#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
|
||||||
|
#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
|
||||||
|
|
||||||
int cxgbe_probe(struct adapter *adapter);
|
int cxgbe_probe(struct adapter *adapter);
|
||||||
int cxgbe_up(struct adapter *adap);
|
int cxgbe_up(struct adapter *adap);
|
||||||
int cxgbe_down(struct port_info *pi);
|
int cxgbe_down(struct port_info *pi);
|
||||||
|
@ -141,8 +141,8 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
|
|||||||
struct adapter *adapter = pi->adapter;
|
struct adapter *adapter = pi->adapter;
|
||||||
int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
|
int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
|
||||||
|
|
||||||
device_info->min_rx_bufsize = 68; /* XXX: Smallest pkt size */
|
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
|
||||||
device_info->max_rx_pktlen = 1500; /* XXX: For now we support mtu */
|
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
|
||||||
device_info->max_rx_queues = max_queues;
|
device_info->max_rx_queues = max_queues;
|
||||||
device_info->max_tx_queues = max_queues;
|
device_info->max_tx_queues = max_queues;
|
||||||
device_info->max_mac_addrs = 1;
|
device_info->max_mac_addrs = 1;
|
||||||
@ -498,6 +498,8 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
int msi_idx = 0;
|
int msi_idx = 0;
|
||||||
unsigned int temp_nb_desc;
|
unsigned int temp_nb_desc;
|
||||||
|
struct rte_eth_dev_info dev_info;
|
||||||
|
unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||||
|
|
||||||
RTE_SET_USED(rx_conf);
|
RTE_SET_USED(rx_conf);
|
||||||
|
|
||||||
@ -505,6 +507,17 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
|||||||
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
|
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
|
||||||
socket_id, mp);
|
socket_id, mp);
|
||||||
|
|
||||||
|
cxgbe_dev_info_get(eth_dev, &dev_info);
|
||||||
|
|
||||||
|
/* Must accommodate at least ETHER_MIN_MTU */
|
||||||
|
if ((pkt_len < dev_info.min_rx_bufsize) ||
|
||||||
|
(pkt_len > dev_info.max_rx_pktlen)) {
|
||||||
|
dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
|
||||||
|
__func__, dev_info.min_rx_bufsize,
|
||||||
|
dev_info.max_rx_pktlen);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Free up the existing queue */
|
/* Free up the existing queue */
|
||||||
if (eth_dev->data->rx_queues[queue_idx]) {
|
if (eth_dev->data->rx_queues[queue_idx]) {
|
||||||
cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
|
cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
|
||||||
@ -534,6 +547,12 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
|||||||
if ((&rxq->fl) != NULL)
|
if ((&rxq->fl) != NULL)
|
||||||
rxq->fl.size = temp_nb_desc;
|
rxq->fl.size = temp_nb_desc;
|
||||||
|
|
||||||
|
/* Set to jumbo mode if necessary */
|
||||||
|
if (pkt_len > ETHER_MAX_LEN)
|
||||||
|
eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
|
||||||
|
else
|
||||||
|
eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
|
||||||
|
|
||||||
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
|
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
|
||||||
&rxq->fl, t4_ethrx_handler,
|
&rxq->fl, t4_ethrx_handler,
|
||||||
t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
|
t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
|
||||||
|
@ -855,12 +855,13 @@ int link_start(struct port_info *pi)
|
|||||||
{
|
{
|
||||||
struct adapter *adapter = pi->adapter;
|
struct adapter *adapter = pi->adapter;
|
||||||
int ret;
|
int ret;
|
||||||
|
unsigned int mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do not set address filters and promiscuity here, the stack does
|
* We do not set address filters and promiscuity here, the stack does
|
||||||
* that step explicitly.
|
* that step explicitly.
|
||||||
*/
|
*/
|
||||||
ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, 1500, -1, -1,
|
ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
|
||||||
-1, 1, true);
|
-1, 1, true);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
|
ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user