ethdev: remove jumbo offload flag

Removing 'DEV_RX_OFFLOAD_JUMBO_FRAME' offload flag.

Instead of drivers announce this capability, application can deduct the
capability by checking reported 'dev_info.max_mtu' or
'dev_info.max_rx_pktlen'.

And instead of application setting this flag explicitly to enable jumbo
frames, this can be deduced by driver by comparing requested 'mtu' to
'RTE_ETHER_MTU'.

Removing this additional configuration for simplification.

Suggested-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
Acked-by: Michal Krawczyk <mk@semihalf.com>
This commit is contained in:
Ferruh Yigit 2021-10-18 14:48:51 +01:00
parent f7e04f57ad
commit b563c14212
75 changed files with 48 additions and 259 deletions

View File

@ -199,8 +199,6 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN;
if (port_conf.rxmode.mtu > RTE_ETHER_MTU)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
t->internal_port = 1;
RTE_ETH_FOREACH_DEV(i) {

View File

@ -1911,7 +1911,7 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
return;
}
update_jumbo_frame_offload(port_id, res->value);
update_mtu_from_frame_size(port_id, res->value);
}
init_port_config();

View File

@ -1206,40 +1206,19 @@ port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
void
port_mtu_set(portid_t port_id, uint16_t mtu)
{
struct rte_port *port = &ports[port_id];
int diag;
struct rte_port *rte_port = &ports[port_id];
struct rte_eth_dev_info dev_info;
int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
ret = eth_dev_info_get_print_err(port_id, &dev_info);
if (ret != 0)
return;
if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
fprintf(stderr,
"Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
mtu, dev_info.min_mtu, dev_info.max_mtu);
return;
}
diag = rte_eth_dev_set_mtu(port_id, mtu);
if (diag != 0) {
fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
return;
}
rte_port->dev_conf.rxmode.mtu = mtu;
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (mtu > RTE_ETHER_MTU)
rte_port->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
rte_port->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
}
port->dev_conf.rxmode.mtu = mtu;
}
/* Generic flow management functions. */

View File

@ -1572,12 +1572,6 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
if (ret != 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
ret = update_jumbo_frame_offload(pid, 0);
if (ret != 0)
fprintf(stderr,
"Updating jumbo frame offload failed for port %u\n",
pid);
if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
@ -3691,24 +3685,18 @@ rxtx_port_config(struct rte_port *port)
}
/*
* Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
* MTU is also aligned.
* Helper function to set MTU from frame size
*
* port->dev_info should be set before calling this function.
*
* if 'max_rx_pktlen' is zero, it is set to current device value, "MTU +
* ETH_OVERHEAD". This is useful to update flags but not MTU value.
*
* return 0 on success, negative on error
*/
int
update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
{
struct rte_port *port = &ports[portid];
uint32_t eth_overhead;
uint64_t rx_offloads;
uint16_t mtu, new_mtu;
bool on;
eth_overhead = get_eth_overhead(&port->dev_info);
@ -3717,40 +3705,8 @@ update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
return -1;
}
if (max_rx_pktlen == 0)
max_rx_pktlen = mtu + eth_overhead;
rx_offloads = port->dev_conf.rxmode.offloads;
new_mtu = max_rx_pktlen - eth_overhead;
if (new_mtu <= RTE_ETHER_MTU) {
rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
on = false;
} else {
if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
fprintf(stderr,
"Frame size (%u) is not supported by port %u\n",
max_rx_pktlen, portid);
return -1;
}
rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
on = true;
}
if (rx_offloads != port->dev_conf.rxmode.offloads) {
uint16_t qid;
port->dev_conf.rxmode.offloads = rx_offloads;
/* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
if (on)
port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
}
}
if (mtu == new_mtu)
return 0;

View File

@ -1045,7 +1045,7 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue,
__rte_unused void *user_param);
void add_tx_dynf_callback(portid_t portid);
void remove_tx_dynf_callback(portid_t portid);
int update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen);
int update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen);
/*
* Work-around of a compilation error with ICC on invocations of the

View File

@ -71,8 +71,6 @@ RX Port and associated core :numref:`dtg_rx_rate`.
* Identify if port Speed and Duplex is matching to desired values with
``rte_eth_link_get``.
* Check ``DEV_RX_OFFLOAD_JUMBO_FRAME`` is set with ``rte_eth_dev_info_get``.
* Check promiscuous mode if the drops do not occur for unique MAC address
with ``rte_eth_promiscuous_get``.

View File

@ -885,7 +885,6 @@ processing. This improved performance is derived from a number of optimizations:
  DEV_RX_OFFLOAD_VLAN_STRIP
  DEV_RX_OFFLOAD_KEEP_CRC
  DEV_RX_OFFLOAD_JUMBO_FRAME
  DEV_RX_OFFLOAD_IPV4_CKSUM
  DEV_RX_OFFLOAD_UDP_CKSUM
  DEV_RX_OFFLOAD_TCP_CKSUM

View File

@ -165,8 +165,7 @@ Jumbo frame
Supports Rx jumbo frames.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
``dev_conf.rxmode.mtu``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``dev_conf.rxmode.mtu``.
* **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
* **[related] API**: ``rte_eth_dev_set_mtu()``.

View File

@ -158,7 +158,6 @@ static struct rte_pci_driver rte_atl_pmd = {
| DEV_RX_OFFLOAD_IPV4_CKSUM \
| DEV_RX_OFFLOAD_UDP_CKSUM \
| DEV_RX_OFFLOAD_TCP_CKSUM \
| DEV_RX_OFFLOAD_JUMBO_FRAME \
| DEV_RX_OFFLOAD_MACSEC_STRIP \
| DEV_RX_OFFLOAD_VLAN_FILTER)

View File

@ -1217,7 +1217,6 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC;

View File

@ -535,7 +535,6 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;

View File

@ -595,7 +595,6 @@ struct bnxt_rep_info {
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_VLAN_EXTEND | \
DEV_RX_OFFLOAD_TCP_LRO | \

View File

@ -736,15 +736,10 @@ static int bnxt_start_nic(struct bnxt *bp)
unsigned int i, j;
int rc;
if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
bp->eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
if (bp->eth_dev->data->mtu > RTE_ETHER_MTU)
bp->flags |= BNXT_FLAG_JUMBO;
} else {
bp->eth_dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
else
bp->flags &= ~BNXT_FLAG_JUMBO;
}
/* THOR does not support ring groups.
* But we will use the array to save RSS context IDs.
@ -1254,7 +1249,6 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
if (eth_dev->data->dev_conf.rxmode.offloads &
~(DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |

View File

@ -1724,14 +1724,6 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
slave_eth_dev->data->dev_conf.rxmode.mtu =
bonded_eth_dev->data->dev_conf.rxmode.mtu;
if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_JUMBO_FRAME)
slave_eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
slave_eth_dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;

View File

@ -78,9 +78,9 @@
#define CNXK_NIX_RX_OFFLOAD_CAPA \
(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP | \
DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_SECURITY)
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH | \
DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_SECURITY)
#define RSS_IPV4_ENABLE \
(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \

View File

@ -92,7 +92,6 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{DEV_RX_OFFLOAD_SECURITY, " Security,"},

View File

@ -51,7 +51,6 @@
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)

View File

@ -660,14 +660,6 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
rxq->rspq.size = temp_nb_desc;
rxq->fl.size = temp_nb_desc;
/* Set to jumbo mode if necessary */
if (eth_dev->data->mtu > RTE_ETHER_MTU)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
eth_dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, NULL,
is_pf4(adapter) ?

View File

@ -365,13 +365,10 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
struct rte_mbuf *buf_bulk[n];
int ret, i;
struct rte_pktmbuf_pool_private *mbp_priv;
u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_JUMBO_FRAME;
/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
if (jumbo_en &&
((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
if ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)
buf_size_idx = RX_LARGE_MTU_BUF;
ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);

View File

@ -54,7 +54,6 @@
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
@ -592,7 +591,6 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} rx_offload_map[] = {
{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},

View File

@ -44,7 +44,6 @@ static uint64_t dev_rx_offloads_sup =
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_TIMESTAMP;
/* Rx offloads which cannot be disabled */
@ -298,7 +297,6 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
{DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}

View File

@ -465,8 +465,8 @@ void eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void em_dev_clear_queues(struct rte_eth_dev *dev);
void em_dev_free_queues(struct rte_eth_dev *dev);
uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
uint64_t em_get_rx_port_offloads_capa(void);
uint64_t em_get_rx_queue_offloads_capa(void);
int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,

View File

@ -1081,8 +1081,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa();
dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() |
dev_info->rx_queue_offload_capa;
dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |

View File

@ -1364,12 +1364,9 @@ em_reset_rx_queue(struct em_rx_queue *rxq)
}
uint64_t
em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
em_get_rx_port_offloads_capa(void)
{
uint64_t rx_offload_capa;
uint32_t max_rx_pktlen;
max_rx_pktlen = em_get_max_pktlen(dev);
rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
@ -1379,14 +1376,12 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
return rx_offload_capa;
}
uint64_t
em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
em_get_rx_queue_offloads_capa(void)
{
uint64_t rx_queue_offload_capa;
@ -1395,7 +1390,7 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
* capability be same to per port queue offloading capability
* for better convenience.
*/
rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
rx_queue_offload_capa = em_get_rx_port_offloads_capa();
return rx_queue_offload_capa;
}
@ -1826,7 +1821,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* to avoid splitting packets that don't fit into
* one buffer.
*/
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
if (dev->data->mtu > RTE_ETHER_MTU ||
rctl_bsize < RTE_ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@ -1861,14 +1856,14 @@ eth_em_rx_init(struct rte_eth_dev *dev)
if ((hw->mac.type == e1000_ich9lan ||
hw->mac.type == e1000_pch2lan ||
hw->mac.type == e1000_ich10lan) &&
rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
dev->data->mtu > RTE_ETHER_MTU) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
}
if (hw->mac.type == e1000_pch2lan) {
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
if (dev->data->mtu > RTE_ETHER_MTU)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
@ -1895,7 +1890,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
/*
* Configure support of jumbo frames, if any.
*/
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
if (dev->data->mtu > RTE_ETHER_MTU)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;

View File

@ -1645,7 +1645,6 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_RSS_HASH;
@ -2332,7 +2331,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Configure support of jumbo frames, if any.
*/
max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if ((dev->data->mtu & RTE_ETHER_MTU) != 0) {
rctl |= E1000_RCTL_LPE;
/*

View File

@ -1916,7 +1916,6 @@ static int ena_infos_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
/* Inform framework about available features */

View File

@ -210,8 +210,7 @@ enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
(DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME);
DEV_RX_OFFLOAD_KEEP_CRC);
return 0;
}

View File

@ -209,7 +209,6 @@ int enic_get_vnic_config(struct enic *enic)
DEV_TX_OFFLOAD_TCP_TSO;
enic->rx_offload_capa =
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |

View File

@ -1183,7 +1183,6 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY |
@ -1201,7 +1200,6 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY |

View File

@ -1779,7 +1779,6 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_RSS_HASH);
}

View File

@ -738,7 +738,6 @@ hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_RSS_HASH;

View File

@ -2686,7 +2686,6 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_TCP_LRO);
info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |

View File

@ -944,7 +944,6 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_TCP_LRO);
info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |

View File

@ -3730,7 +3730,6 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;

View File

@ -2901,7 +2901,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
rxq->max_pkt_len =
RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
data->mtu + I40E_ETH_OVERHEAD);
if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (data->mtu > RTE_ETHER_MTU) {
if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "

View File

@ -588,7 +588,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
/* Check if the jumbo frame and maximum packet length are set
* correctly.
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev->data->mtu & RTE_ETHER_MTU) {
if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
max_pkt_len > IAVF_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@ -968,7 +968,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH;

View File

@ -72,7 +72,7 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
/* Check if the jumbo frame and maximum packet length are set
* correctly.
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev_data->mtu > RTE_ETHER_MTU) {
if (max_pkt_len <= ICE_ETH_MAX_LEN ||
max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@ -681,7 +681,6 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =

View File

@ -149,7 +149,6 @@ ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_RSS_HASH;

View File

@ -3676,7 +3676,6 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_FILTER;

View File

@ -267,7 +267,6 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint16_t buf_size;
struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
struct ice_adapter *ad = rxq->vsi->adapter;
@ -282,7 +281,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
frame_size);
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev_data->mtu > RTE_ETHER_MTU) {
if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "

View File

@ -73,7 +73,6 @@ extern "C" {
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)

View File

@ -1080,7 +1080,7 @@ igc_rx_init(struct rte_eth_dev *dev)
IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* Configure support of jumbo frames, if any. */
if ((offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
if (dev->data->mtu & RTE_ETHER_MTU)
rctl |= IGC_RCTL_LPE;
else
rctl &= ~IGC_RCTL_LPE;

View File

@ -414,7 +414,6 @@ ionic_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_SCATTER |

View File

@ -74,8 +74,7 @@ ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_JUMBO_FRAME;
DEV_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =

View File

@ -6040,7 +6040,6 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
@ -6062,14 +6061,12 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
bcnrc_val = 0;
}
rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
(dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE))
if (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE)
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME);
else
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT);

View File

@ -597,15 +597,10 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
if (max_frs < max_frame) {
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
if (max_frame > IXGBE_ETH_MAX_LEN) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
if (max_frame > IXGBE_ETH_MAX_LEN)
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
} else {
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
else
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;

View File

@ -3036,7 +3036,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_RSS_HASH;
@ -5079,7 +5078,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
/*
* Configure jumbo frame support, if any.
*/
if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if ((dev->data->mtu & RTE_ETHER_MTU) != 0) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;

View File

@ -684,7 +684,6 @@ mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH;
if (priv->hw_csum)

View File

@ -335,7 +335,6 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH);
if (!config->mprq.enabled)

View File

@ -54,8 +54,7 @@
#define MRVL_NETA_MRU_TO_MTU(mru) ((mru) - MRVL_NETA_HDRS_LEN)
/** Rx offloads capabilities */
#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_CHECKSUM)
#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM)
/** Tx offloads capabilities */
#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \

View File

@ -59,7 +59,6 @@
/** Port Rx offload capabilities */
#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_CHECKSUM)
/** Port Tx offloads capabilities */

View File

@ -369,8 +369,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
hw->mtu = dev->data->mtu;
hw->mtu = dev->data->mtu;
if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
@ -757,9 +756,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
};
/* All NFP devices support jumbo frames */
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;

View File

@ -60,7 +60,6 @@
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_VLAN_FILTER)
#define OCTEONTX_TX_OFFLOADS ( \

View File

@ -148,7 +148,6 @@
DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_VLAN_FILTER | \

View File

@ -39,8 +39,7 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
devinfo->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;

View File

@ -953,12 +953,6 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
droq_pkt->l3_len = hdr_lens.l3_len;
droq_pkt->l4_len = hdr_lens.l4_len;
if ((droq_pkt->pkt_len > (RTE_ETHER_MAX_LEN + OTX_CUST_DATA_LEN)) &&
!(otx_ep->rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
rte_pktmbuf_free(droq_pkt);
goto oq_read_fail;
}
if (droq_pkt->nb_segs > 1 &&
!(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
rte_pktmbuf_free(droq_pkt);

View File

@ -1392,7 +1392,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_RSS_HASH);

View File

@ -941,8 +941,6 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
{
uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
return caps & sfc_rx_get_offload_mask(sa);
}

View File

@ -40,7 +40,6 @@
#define NICVF_RX_OFFLOAD_CAPA ( \
DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)

View File

@ -1974,7 +1974,6 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_SCATTER;

View File

@ -2547,7 +2547,6 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
host_features = VIRTIO_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {

View File

@ -54,7 +54,6 @@
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_TCP_LRO | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_RSS_HASH)
int vmxnet3_segs_dynfield_offset = -1;

View File

@ -149,8 +149,7 @@ static struct rte_eth_conf port_conf = {
RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME),
DEV_RX_OFFLOAD_SCATTER),
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,

View File

@ -164,8 +164,7 @@ static struct rte_eth_conf port_conf = {
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME),
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {

View File

@ -2198,8 +2198,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
if (mtu_size > RTE_ETHER_MTU)
local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
local_port_conf.rxmode.mtu = mtu_size;
if (multi_seg_required()) {

View File

@ -112,7 +112,6 @@ static struct rte_eth_conf port_conf = {
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,

View File

@ -789,11 +789,6 @@ kni_change_mtu_(uint16_t port_id, unsigned int new_mtu)
}
memcpy(&conf, &port_conf, sizeof(conf));
/* Set new MTU */
if (new_mtu > RTE_ETHER_MTU)
conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
conf.rxmode.mtu = new_mtu;
ret = rte_eth_dev_configure(port_id, 1, 1, &conf);

View File

@ -2002,10 +2002,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU) {
if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
return 0;
}

View File

@ -730,10 +730,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU) {
if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
return 0;
}

View File

@ -2508,10 +2508,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU) {
if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
return 0;
}

View File

@ -986,10 +986,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU) {
if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
return 0;
}

View File

@ -3489,10 +3489,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
if (conf->rxmode.mtu > RTE_ETHER_MTU) {
if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
return 0;
}

View File

@ -631,11 +631,8 @@ us_vhost_parse_args(int argc, char **argv)
return -1;
}
mergeable = !!ret;
if (ret) {
vmdq_conf_default.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
if (ret)
vmdq_conf_default.rxmode.mtu = MAX_MTU;
}
break;
case OPT_STATS_NUM:

View File

@ -121,7 +121,6 @@ static const struct {
RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
RTE_RX_OFFLOAD_BIT2STR(SCATTER),
RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
RTE_RX_OFFLOAD_BIT2STR(SECURITY),
@ -1476,13 +1475,6 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
goto rollback;
}
if ((dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
if (dev->data->dev_conf.rxmode.mtu < RTE_ETHER_MIN_MTU ||
dev->data->dev_conf.rxmode.mtu > RTE_ETHER_MTU)
/* Use default value */
dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
}
dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
/*
@ -3647,7 +3639,6 @@ rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int ret;
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
int is_jumbo_frame_capable = 0;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@ -3675,27 +3666,12 @@ rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
frame_size = mtu + overhead_len;
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
is_jumbo_frame_capable = 1;
}
if (mtu > RTE_ETHER_MTU && is_jumbo_frame_capable == 0)
return -EINVAL;
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
if (ret == 0) {
if (ret == 0)
dev->data->mtu = mtu;
/* switch to jumbo mode if needed */
if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
}
return eth_err(port_id, ret);
}

View File

@ -1356,7 +1356,6 @@ struct rte_eth_conf {
#define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
#define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
#define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
#define DEV_RX_OFFLOAD_SCATTER 0x00002000
/**
* Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME