net/ionic: update MTU calculations

Test min and max MTU against values read from firmware, for correctness.
Update the firmware field name, for clarity.
The device must be stopped before changing MTU, for correctness.
Store the calculated frame size in the queue, for performance.

Signed-off-by: Andrew Boyer <andrew.boyer@amd.com>
Signed-off-by: R Mohamed Shah <mohamedshah.r@amd.com>
This commit is contained in:
Andrew Boyer 2022-10-18 12:41:04 -07:00 committed by Ferruh Yigit
parent 766687540c
commit b671e69ae4
6 changed files with 51 additions and 42 deletions

View File

@ -11,8 +11,11 @@
#include "ionic_if.h"
#include "ionic_regs.h"
#define VLAN_TAG_SIZE 4
#define IONIC_MIN_MTU RTE_ETHER_MIN_MTU
#define IONIC_MAX_MTU 9194
#define IONIC_MAX_MTU 9378
#define IONIC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + VLAN_TAG_SIZE)
#define IONIC_MAX_RING_DESC 32768
#define IONIC_MIN_RING_DESC 16

View File

@ -343,18 +343,17 @@ static int
ionic_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
int err;
IONIC_PRINT_CALL();
if (lif->state & IONIC_LIF_F_UP) {
IONIC_PRINT(ERR, "Stop %s before setting mtu", lif->name);
return -EBUSY;
}
/*
* Note: mtu check against IONIC_MIN_MTU, IONIC_MAX_MTU
* is done by the API.
*/
/* Note: mtu check against min/max is done by the API */
IONIC_PRINT(INFO, "Setting mtu %u", mtu);
err = ionic_lif_change_mtu(lif, mtu);
if (err)
return err;
/* Update the frame size used by the Rx path */
lif->frame_size = mtu + IONIC_ETH_OVERHEAD;
return 0;
}
@ -376,12 +375,16 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
/* Also add ETHER_CRC_LEN if the adapter is able to keep CRC */
dev_info->min_rx_bufsize = IONIC_MIN_MTU + RTE_ETHER_HDR_LEN;
dev_info->max_rx_pktlen = IONIC_MAX_MTU + RTE_ETHER_HDR_LEN;
dev_info->max_mac_addrs = adapter->max_mac_addrs;
dev_info->min_mtu = IONIC_MIN_MTU;
dev_info->max_mtu = IONIC_MAX_MTU;
dev_info->min_mtu = RTE_MAX((uint32_t)IONIC_MIN_MTU,
rte_le_to_cpu_32(ident->lif.eth.min_mtu));
dev_info->max_mtu = RTE_MIN((uint32_t)IONIC_MAX_MTU,
rte_le_to_cpu_32(ident->lif.eth.max_mtu));
dev_info->min_rx_bufsize = dev_info->min_mtu + IONIC_ETH_OVERHEAD;
dev_info->max_rx_pktlen = dev_info->max_mtu + IONIC_ETH_OVERHEAD;
dev_info->max_lro_pkt_size =
eth_dev->data->dev_conf.rxmode.max_lro_pkt_size;
dev_info->max_mac_addrs = adapter->max_mac_addrs;
dev_info->hash_key_size = IONIC_RSS_HASH_KEY_SIZE;
dev_info->reta_size = rte_le_to_cpu_16(ident->lif.eth.rss_ind_tbl_sz);
dev_info->flow_type_rss_offloads = IONIC_ETH_RSS_OFFLOAD_ALL;
@ -889,6 +892,15 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
if (dev_conf->lpbk_mode)
IONIC_PRINT(WARNING, "Loopback mode not supported");
lif->frame_size = eth_dev->data->mtu + IONIC_ETH_OVERHEAD;
err = ionic_lif_change_mtu(lif, eth_dev->data->mtu);
if (err) {
IONIC_PRINT(ERR, "Cannot set LIF frame size %u: %d",
lif->frame_size, err);
return err;
}
err = ionic_lif_start(lif);
if (err) {
IONIC_PRINT(ERR, "Cannot start LIF: %d", err);

View File

@ -401,8 +401,8 @@ union ionic_lif_config {
* @version: Ethernet identify structure version
* @max_ucast_filters: Number of perfect unicast addresses supported
* @max_mcast_filters: Number of perfect multicast addresses supported
* @min_frame_size: Minimum size of frames to be sent
* @max_frame_size: Maximum size of frames to be sent
* @min_mtu: Minimum MTU of frames to be sent
* @max_mtu: Maximum MTU of frames to be sent
* @config: LIF config struct with features, mtu, mac, q counts
*
* @rdma: RDMA identify structure
@ -434,8 +434,8 @@ union ionic_lif_identity {
__le32 max_ucast_filters;
__le32 max_mcast_filters;
__le16 rss_ind_tbl_sz;
__le32 min_frame_size;
__le32 max_frame_size;
__le32 min_mtu;
__le32 max_mtu;
u8 rsvd2[106];
union ionic_lif_config config;
} __rte_packed eth;

View File

@ -536,7 +536,7 @@ ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
}
int
ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu)
ionic_lif_change_mtu(struct ionic_lif *lif, uint32_t new_mtu)
{
struct ionic_admin_ctx ctx = {
.pending_work = true,
@ -546,13 +546,8 @@ ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu)
.mtu = rte_cpu_to_le_32(new_mtu),
},
};
int err;
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
return 0;
return ionic_adminq_post_wait(lif, &ctx);
}
int
@ -730,6 +725,7 @@ ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
int err;
flags = IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif,
IONIC_QTYPE_RXQ,
sizeof(struct ionic_rx_qcq),

View File

@ -81,6 +81,7 @@ struct ionic_rx_qcq {
/* cacheline2 */
struct rte_mempool *mb_pool;
uint16_t frame_size; /* Based on configured MTU */
uint16_t flags;
/* cacheline3 (inside stats) */
@ -123,6 +124,7 @@ struct ionic_lif {
struct ionic_adapter *adapter;
struct rte_eth_dev *eth_dev;
uint16_t port_id; /**< Device port identifier */
uint16_t frame_size;
uint32_t hw_index;
uint32_t state;
uint32_t ntxqcqs;
@ -181,7 +183,7 @@ int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr);
int ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb,
void *cb_arg);
int ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu);
int ionic_lif_change_mtu(struct ionic_lif *lif, uint32_t new_mtu);
int ionic_dev_add_mac(struct rte_eth_dev *eth_dev,
struct rte_ether_addr *mac_addr,

View File

@ -772,8 +772,6 @@ ionic_rx_clean(struct ionic_rx_qcq *rxq,
struct ionic_rxq_comp *cq_desc_base = cq->base;
struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
struct rte_mbuf *rxm, *rxm_seg;
uint32_t max_frame_size =
rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
uint64_t pkt_flags = 0;
uint32_t pkt_type;
struct ionic_rx_stats *stats = &rxq->stats;
@ -814,8 +812,7 @@ ionic_rx_clean(struct ionic_rx_qcq *rxq,
return;
}
if (cq_desc->len > max_frame_size ||
cq_desc->len == 0) {
if (cq_desc->len > rxq->frame_size || cq_desc->len == 0) {
stats->bad_len++;
ionic_rx_recycle(q, q_desc_index, rxm);
return;
@ -936,7 +933,7 @@ ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
}
static __rte_always_inline int
ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
ionic_rx_fill(struct ionic_rx_qcq *rxq)
{
struct ionic_queue *q = &rxq->qcq.q;
struct ionic_rxq_desc *desc, *desc_base = q->base;
@ -961,7 +958,7 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
info = IONIC_INFO_PTR(q, q->head_idx);
nsegs = (len + buf_size - 1) / buf_size;
nsegs = (rxq->frame_size + buf_size - 1) / buf_size;
desc = &desc_base[q->head_idx];
dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
@ -996,9 +993,9 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
prev_rxm_seg = rxm_seg;
}
if (size < len)
if (size < rxq->frame_size)
IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
size, len);
size, rxq->frame_size);
info[0] = rxm;
@ -1016,7 +1013,6 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
int __rte_cold
ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
uint32_t frame_size = eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
struct ionic_rx_qcq *rxq;
int err;
@ -1029,8 +1025,10 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
rxq = eth_dev->data->rx_queues[rx_queue_id];
IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)",
rx_queue_id, rxq->qcq.q.num_descs, frame_size);
rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN;
IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u",
rx_queue_id, rxq->qcq.q.num_descs, rxq->frame_size);
if (!(rxq->flags & IONIC_QCQ_F_INITED)) {
err = ionic_lif_rxq_init(rxq);
@ -1041,7 +1039,7 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
}
/* Allocate buffers for descriptor rings */
if (ionic_rx_fill(rxq, frame_size) != 0) {
if (ionic_rx_fill(rxq) != 0) {
IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
rx_queue_id);
return -1;
@ -1129,8 +1127,6 @@ ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct ionic_rx_qcq *rxq = rx_queue;
uint32_t frame_size =
rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
struct ionic_rx_service service_cb_arg;
service_cb_arg.rx_pkts = rx_pkts;
@ -1139,7 +1135,7 @@ ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
ionic_rxq_service(rxq, nb_pkts, &service_cb_arg);
ionic_rx_fill(rxq, frame_size);
ionic_rx_fill(rxq);
return service_cb_arg.nb_rx;
}