net/qede: prevent crash while changing MTU dynamically

The driver can handle dynamic MTU change without needing the port to be
stopped explicitly by the application. However, there is currently no
check to prevent I/Os from happening on a different thread while the
port is going thru' reset internally. This patch fixes this issue by
assigning RX/TX burst functions to a dummy function and also reconfigure
RX bufsize for each rx queue based on the new MTU value.

Fixes: 200645ac79 ("net/qede: set MTU")
Cc: stable@dpdk.org

Signed-off-by: Harish Patil <harish.patil@qlogic.com>
This commit is contained in:
Harish Patil 2017-03-24 00:40:59 -07:00 committed by Ferruh Yigit
parent 8130abb3ec
commit 1ef4c3a5c1
3 changed files with 55 additions and 14 deletions

View File

@ -1684,32 +1684,61 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
uint32_t frame_size;
struct qede_dev *qdev = dev->data->dev_private;
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
struct qede_fastpath *fp;
uint32_t frame_size;
uint16_t rx_buf_size;
uint16_t bufsz;
int i;
PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
/* VLAN_TAG = 4 */
frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
frame_size = mtu + QEDE_ETH_OVERHEAD;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
DP_ERR(edev, "MTU %u out of range\n", mtu);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
dev->data->min_rx_buf_size);
return -EINVAL;
}
/* Temporarily replace I/O functions with dummy ones. It cannot
* be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
*/
dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
qede_dev_stop(dev);
rte_delay_ms(1000);
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
for_each_queue(i) {
fp = &qdev->fp_array[i];
if (fp->type & QEDE_FASTPATH_RX) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
if (dev->data->scattered_rx)
rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
fp->rxq->rx_buf_size = rx_buf_size;
DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
}
}
qede_dev_start(dev);
if (frame_size > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
qdev->mtu = mtu;
qede_dev_stop(dev);
qede_dev_start(dev);
/* Reassign back */
dev->rx_pkt_burst = qede_recv_pkts;
dev->tx_pkt_burst = qede_xmit_pkts;
return 0;
}

View File

@ -1935,3 +1935,11 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
}
uint16_t
qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
__rte_unused struct rte_mbuf **pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}

View File

@ -265,6 +265,10 @@ uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
__rte_unused struct rte_mbuf **pkts,
__rte_unused uint16_t nb_pkts);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);