ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to clarify it. 'rte_eth_dev_configure()' API accepts max Rx packet size via 'uint32_t max_rx_pkt_len' field of the config struct 'struct rte_eth_conf'. Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result stored into '(struct rte_eth_dev)->data->mtu'. These two APIs are related but they work in a disconnected way, they store the set values in different variables which makes hard to figure out which one to use, also having two different method for a related functionality is confusing for the users. Other issues causing confusion is: * maximum transmission unit (MTU) is payload of the Ethernet frame. And 'max_rx_pkt_len' is the size of the Ethernet frame. Difference is Ethernet frame overhead, and this overhead may be different from device to device based on what device supports, like VLAN and QinQ. * 'max_rx_pkt_len' is only valid when application requested jumbo frame, which adds additional confusion and some APIs and PMDs already discards this documented behavior. * For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory field, this adds configuration complexity for application. As solution, both APIs gets MTU as parameter, and both saves the result in same variable '(struct rte_eth_dev)->data->mtu'. For this 'max_rx_pkt_len' updated as 'mtu', and it is always valid independent from jumbo frame. For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user request and it should be used only within configure function and result should be stored to '(struct rte_eth_dev)->data->mtu'. After that point both application and PMD uses MTU from this variable. When application doesn't provide an MTU during 'rte_eth_dev_configure()' default 'RTE_ETHER_MTU' value is used. Additional clarification done on scattered Rx configuration, in relation to MTU and Rx buffer size. MTU is used to configure the device for physical Rx/Tx size limitation, Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer size as Rx buffer size. PMDs compare MTU against Rx buffer size to decide enabling scattered Rx or not. If scattered Rx is not supported by device, MTU bigger than Rx buffer size should fail. Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Somnath Kotur <somnath.kotur@broadcom.com> Acked-by: Huisong Li <lihuisong@huawei.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Acked-by: Rosen Xu <rosen.xu@intel.com> Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
This commit is contained in:
parent
24f1955d1e
commit
1bb4a528c4
@ -669,7 +669,6 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
|
||||
struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.rx_adv_conf = {
|
||||
|
@ -197,8 +197,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz;
|
||||
if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN)
|
||||
port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
|
||||
RTE_ETHER_CRC_LEN;
|
||||
if (port_conf.rxmode.mtu > RTE_ETHER_MTU)
|
||||
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
t->internal_port = 1;
|
||||
|
@ -1880,45 +1880,38 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
|
||||
__rte_unused void *data)
|
||||
{
|
||||
struct cmd_config_max_pkt_len_result *res = parsed_result;
|
||||
uint32_t max_rx_pkt_len_backup = 0;
|
||||
portid_t pid;
|
||||
portid_t port_id;
|
||||
int ret;
|
||||
|
||||
if (strcmp(res->name, "max-pkt-len") != 0) {
|
||||
printf("Unknown parameter\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!all_ports_stopped()) {
|
||||
fprintf(stderr, "Please stop all ports first\n");
|
||||
return;
|
||||
}
|
||||
|
||||
RTE_ETH_FOREACH_DEV(pid) {
|
||||
struct rte_port *port = &ports[pid];
|
||||
RTE_ETH_FOREACH_DEV(port_id) {
|
||||
struct rte_port *port = &ports[port_id];
|
||||
|
||||
if (!strcmp(res->name, "max-pkt-len")) {
|
||||
if (res->value < RTE_ETHER_MIN_LEN) {
|
||||
fprintf(stderr,
|
||||
"max-pkt-len can not be less than %d\n",
|
||||
RTE_ETHER_MIN_LEN);
|
||||
return;
|
||||
}
|
||||
if (res->value == port->dev_conf.rxmode.max_rx_pkt_len)
|
||||
return;
|
||||
|
||||
ret = eth_dev_info_get_print_err(pid, &port->dev_info);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr,
|
||||
"rte_eth_dev_info_get() failed for port %u\n",
|
||||
pid);
|
||||
return;
|
||||
}
|
||||
|
||||
max_rx_pkt_len_backup = port->dev_conf.rxmode.max_rx_pkt_len;
|
||||
|
||||
port->dev_conf.rxmode.max_rx_pkt_len = res->value;
|
||||
if (update_jumbo_frame_offload(pid) != 0)
|
||||
port->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len_backup;
|
||||
} else {
|
||||
fprintf(stderr, "Unknown parameter\n");
|
||||
if (res->value < RTE_ETHER_MIN_LEN) {
|
||||
fprintf(stderr,
|
||||
"max-pkt-len can not be less than %d\n",
|
||||
RTE_ETHER_MIN_LEN);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = eth_dev_info_get_print_err(port_id, &port->dev_info);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr,
|
||||
"rte_eth_dev_info_get() failed for port %u\n",
|
||||
port_id);
|
||||
return;
|
||||
}
|
||||
|
||||
update_jumbo_frame_offload(port_id, res->value);
|
||||
}
|
||||
|
||||
init_port_config();
|
||||
|
@ -1209,7 +1209,6 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
|
||||
int diag;
|
||||
struct rte_port *rte_port = &ports[port_id];
|
||||
struct rte_eth_dev_info dev_info;
|
||||
uint16_t eth_overhead;
|
||||
int ret;
|
||||
|
||||
if (port_id_is_invalid(port_id, ENABLED_WARN))
|
||||
@ -1226,21 +1225,18 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
|
||||
return;
|
||||
}
|
||||
diag = rte_eth_dev_set_mtu(port_id, mtu);
|
||||
if (diag)
|
||||
if (diag != 0) {
|
||||
fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
|
||||
else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
/*
|
||||
* Ether overhead in driver is equal to the difference of
|
||||
* max_rx_pktlen and max_mtu in rte_eth_dev_info when the
|
||||
* device supports jumbo frame.
|
||||
*/
|
||||
eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
|
||||
if (mtu > RTE_ETHER_MTU) {
|
||||
return;
|
||||
}
|
||||
|
||||
rte_port->dev_conf.rxmode.mtu = mtu;
|
||||
|
||||
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
rte_port->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
rte_port->dev_conf.rxmode.max_rx_pkt_len =
|
||||
mtu + eth_overhead;
|
||||
} else
|
||||
else
|
||||
rte_port->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
}
|
||||
|
@ -951,7 +951,7 @@ launch_args_parse(int argc, char** argv)
|
||||
if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
|
||||
n = atoi(optarg);
|
||||
if (n >= RTE_ETHER_MIN_LEN)
|
||||
rx_mode.max_rx_pkt_len = (uint32_t) n;
|
||||
max_rx_pkt_len = n;
|
||||
else
|
||||
rte_exit(EXIT_FAILURE,
|
||||
"Invalid max-pkt-len=%d - should be > %d\n",
|
||||
|
@ -219,6 +219,11 @@ unsigned int xstats_display_num; /**< Size of extended statistics to show */
|
||||
*/
|
||||
uint8_t f_quit;
|
||||
|
||||
/*
|
||||
* Max Rx frame size, set by '--max-pkt-len' parameter.
|
||||
*/
|
||||
uint32_t max_rx_pkt_len;
|
||||
|
||||
/*
|
||||
* Configuration of packet segments used to scatter received packets
|
||||
* if some of split features is configured.
|
||||
@ -451,13 +456,7 @@ lcoreid_t latencystats_lcore_id = -1;
|
||||
/*
|
||||
* Ethernet device configuration.
|
||||
*/
|
||||
struct rte_eth_rxmode rx_mode = {
|
||||
/* Default maximum frame length.
|
||||
* Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
|
||||
* in init_config().
|
||||
*/
|
||||
.max_rx_pkt_len = 0,
|
||||
};
|
||||
struct rte_eth_rxmode rx_mode;
|
||||
|
||||
struct rte_eth_txmode tx_mode = {
|
||||
.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
|
||||
@ -1542,11 +1541,24 @@ check_nb_hairpinq(queueid_t hairpinq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
get_eth_overhead(struct rte_eth_dev_info *dev_info)
|
||||
{
|
||||
uint32_t eth_overhead;
|
||||
|
||||
if (dev_info->max_mtu != UINT16_MAX &&
|
||||
dev_info->max_rx_pktlen > dev_info->max_mtu)
|
||||
eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
|
||||
else
|
||||
eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
|
||||
return eth_overhead;
|
||||
}
|
||||
|
||||
static void
|
||||
init_config_port_offloads(portid_t pid, uint32_t socket_id)
|
||||
{
|
||||
struct rte_port *port = &ports[pid];
|
||||
uint16_t data_size;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
@ -1560,7 +1572,7 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
|
||||
if (ret != 0)
|
||||
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
|
||||
|
||||
ret = update_jumbo_frame_offload(pid);
|
||||
ret = update_jumbo_frame_offload(pid, 0);
|
||||
if (ret != 0)
|
||||
fprintf(stderr,
|
||||
"Updating jumbo frame offload failed for port %u\n",
|
||||
@ -1580,6 +1592,10 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
|
||||
if (eth_link_speed)
|
||||
port->dev_conf.link_speeds = eth_link_speed;
|
||||
|
||||
if (max_rx_pkt_len)
|
||||
port->dev_conf.rxmode.mtu = max_rx_pkt_len -
|
||||
get_eth_overhead(&port->dev_info);
|
||||
|
||||
/* set flag to initialize port/queue */
|
||||
port->need_reconfig = 1;
|
||||
port->need_reconfig_queues = 1;
|
||||
@ -1592,14 +1608,20 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
|
||||
*/
|
||||
if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
|
||||
port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
|
||||
data_size = rx_mode.max_rx_pkt_len /
|
||||
port->dev_info.rx_desc_lim.nb_mtu_seg_max;
|
||||
uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
|
||||
uint16_t mtu;
|
||||
|
||||
if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
|
||||
mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
|
||||
TESTPMD_LOG(WARNING,
|
||||
"Configured mbuf size of the first segment %hu\n",
|
||||
mbuf_data_size[0]);
|
||||
if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
|
||||
uint16_t data_size = (mtu + eth_overhead) /
|
||||
port->dev_info.rx_desc_lim.nb_mtu_seg_max;
|
||||
uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
if (buffer_size > mbuf_data_size[0]) {
|
||||
mbuf_data_size[0] = buffer_size;
|
||||
TESTPMD_LOG(WARNING,
|
||||
"Configured mbuf size of the first segment %hu\n",
|
||||
mbuf_data_size[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2735,6 +2757,7 @@ start_port(portid_t pid)
|
||||
pi);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* configure port */
|
||||
diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
|
||||
nb_txq + nb_hairpinq,
|
||||
@ -3669,44 +3692,45 @@ rxtx_port_config(struct rte_port *port)
|
||||
|
||||
/*
|
||||
* Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
|
||||
* MTU is also aligned if JUMBO_FRAME offload is not set.
|
||||
* MTU is also aligned.
|
||||
*
|
||||
* port->dev_info should be set before calling this function.
|
||||
*
|
||||
* if 'max_rx_pktlen' is zero, it is set to current device value, "MTU +
|
||||
* ETH_OVERHEAD". This is useful to update flags but not MTU value.
|
||||
*
|
||||
* return 0 on success, negative on error
|
||||
*/
|
||||
int
|
||||
update_jumbo_frame_offload(portid_t portid)
|
||||
update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
|
||||
{
|
||||
struct rte_port *port = &ports[portid];
|
||||
uint32_t eth_overhead;
|
||||
uint64_t rx_offloads;
|
||||
int ret;
|
||||
uint16_t mtu, new_mtu;
|
||||
bool on;
|
||||
|
||||
/* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
|
||||
if (port->dev_info.max_mtu != UINT16_MAX &&
|
||||
port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
|
||||
eth_overhead = port->dev_info.max_rx_pktlen -
|
||||
port->dev_info.max_mtu;
|
||||
else
|
||||
eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
eth_overhead = get_eth_overhead(&port->dev_info);
|
||||
|
||||
if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
|
||||
printf("Failed to get MTU for port %u\n", portid);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (max_rx_pktlen == 0)
|
||||
max_rx_pktlen = mtu + eth_overhead;
|
||||
|
||||
rx_offloads = port->dev_conf.rxmode.offloads;
|
||||
new_mtu = max_rx_pktlen - eth_overhead;
|
||||
|
||||
/* Default config value is 0 to use PMD specific overhead */
|
||||
if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
|
||||
port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
|
||||
|
||||
if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
|
||||
if (new_mtu <= RTE_ETHER_MTU) {
|
||||
rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
on = false;
|
||||
} else {
|
||||
if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
|
||||
fprintf(stderr,
|
||||
"Frame size (%u) is not supported by port %u\n",
|
||||
port->dev_conf.rxmode.max_rx_pkt_len,
|
||||
portid);
|
||||
max_rx_pktlen, portid);
|
||||
return -1;
|
||||
}
|
||||
rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
@ -3727,19 +3751,18 @@ update_jumbo_frame_offload(portid_t portid)
|
||||
}
|
||||
}
|
||||
|
||||
/* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
|
||||
* if unset do it here
|
||||
*/
|
||||
if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
|
||||
ret = eth_dev_set_mtu_mp(portid,
|
||||
port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
|
||||
if (ret)
|
||||
fprintf(stderr,
|
||||
"Failed to set MTU to %u for port %u\n",
|
||||
port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
|
||||
portid);
|
||||
if (mtu == new_mtu)
|
||||
return 0;
|
||||
|
||||
if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
|
||||
fprintf(stderr,
|
||||
"Failed to set MTU to %u for port %u\n",
|
||||
new_mtu, portid);
|
||||
return -1;
|
||||
}
|
||||
|
||||
port->dev_conf.rxmode.mtu = new_mtu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -467,6 +467,8 @@ extern uint8_t bitrate_enabled;
|
||||
|
||||
extern struct rte_fdir_conf fdir_conf;
|
||||
|
||||
extern uint32_t max_rx_pkt_len;
|
||||
|
||||
/*
|
||||
* Configuration of packet segments used to scatter received packets
|
||||
* if some of split features is configured.
|
||||
@ -1043,7 +1045,7 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue,
|
||||
__rte_unused void *user_param);
|
||||
void add_tx_dynf_callback(portid_t portid);
|
||||
void remove_tx_dynf_callback(portid_t portid);
|
||||
int update_jumbo_frame_offload(portid_t portid);
|
||||
int update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen);
|
||||
|
||||
/*
|
||||
* Work-around of a compilation error with ICC on invocations of the
|
||||
|
@ -136,7 +136,6 @@ static struct rte_eth_conf default_pmd_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.split_hdr_size = 0,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
},
|
||||
.txmode = {
|
||||
.mq_mode = ETH_MQ_TX_NONE,
|
||||
|
@ -108,7 +108,6 @@ static struct link_bonding_unittest_params test_params = {
|
||||
static struct rte_eth_conf default_pmd_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
|
@ -81,7 +81,6 @@ static struct link_bonding_rssconf_unittest_params test_params = {
|
||||
static struct rte_eth_conf default_pmd_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
@ -93,7 +92,6 @@ static struct rte_eth_conf default_pmd_conf = {
|
||||
static struct rte_eth_conf rss_pmd_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
|
@ -63,7 +63,6 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
|
||||
static struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
|
@ -335,7 +335,7 @@ Maximum packet length
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The DPAA SoC family support a maximum of a 10240 jumbo frame. The value
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.mtu``
|
||||
member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
|
||||
up to 10240 bytes can still reach the host interface.
|
||||
|
||||
|
@ -545,7 +545,7 @@ Maximum packet length
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The DPAA2 SoC family support a maximum of a 10240 jumbo frame. The value
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.mtu``
|
||||
member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
|
||||
up to 10240 bytes can still reach the host interface.
|
||||
|
||||
|
@ -166,7 +166,7 @@ Jumbo frame
|
||||
Supports Rx jumbo frames.
|
||||
|
||||
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
|
||||
``dev_conf.rxmode.max_rx_pkt_len``.
|
||||
``dev_conf.rxmode.mtu``.
|
||||
* **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
|
||||
* **[related] API**: ``rte_eth_dev_set_mtu()``.
|
||||
|
||||
|
@ -141,7 +141,7 @@ Maximum packet length
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The FM10000 family of NICS support a maximum of a 15K jumbo frame. The value
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.mtu``
|
||||
member of ``struct rte_eth_conf`` is set to a value lower than 15364, frames
|
||||
up to 15364 bytes can still reach the host interface.
|
||||
|
||||
|
@ -606,9 +606,9 @@ Driver options
|
||||
and each stride receives one packet. MPRQ can improve throughput for
|
||||
small-packet traffic.
|
||||
|
||||
When MPRQ is enabled, max_rx_pkt_len can be larger than the size of
|
||||
When MPRQ is enabled, MTU can be larger than the size of
|
||||
user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
|
||||
configure large stride size enough to accommodate max_rx_pkt_len as long as
|
||||
configure large stride size enough to accommodate MTU as long as
|
||||
device allows. Note that this can waste system memory compared to enabling Rx
|
||||
scatter and multi-segment packet.
|
||||
|
||||
|
@ -157,7 +157,7 @@ Maximum packet length
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The OCTEON TX SoC family NICs support a maximum of a 32K jumbo frame. The value
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.mtu``
|
||||
member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
|
||||
up to 32k bytes can still reach the host interface.
|
||||
|
||||
|
@ -392,7 +392,7 @@ Maximum packet length
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ThunderX SoC family NICs support a maximum of a 9K jumbo frame. The value
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
|
||||
is fixed and cannot be changed. So, even when the ``rxmode.mtu``
|
||||
member of ``struct rte_eth_conf`` is set to a value lower than 9200, frames
|
||||
up to 9200 bytes can still reach the host interface.
|
||||
|
||||
|
@ -92,31 +92,6 @@ Deprecation Notices
|
||||
In 19.11 PMDs will still update the field even when the offload is not
|
||||
enabled.
|
||||
|
||||
* ethdev: ``uint32_t max_rx_pkt_len`` field of ``struct rte_eth_rxmode``, will be
|
||||
replaced by a new ``uint32_t mtu`` field of ``struct rte_eth_conf`` in v21.11.
|
||||
The new ``mtu`` field will be used to configure the initial device MTU via
|
||||
``rte_eth_dev_configure()`` API.
|
||||
Later MTU can be changed by ``rte_eth_dev_set_mtu()`` API as done now.
|
||||
The existing ``(struct rte_eth_dev)->data->mtu`` variable will be used to store
|
||||
the configured ``mtu`` value,
|
||||
and this new ``(struct rte_eth_dev)->data->dev_conf.mtu`` variable will
|
||||
be used to store the user configuration request.
|
||||
Unlike ``max_rx_pkt_len``, which was valid only when ``JUMBO_FRAME`` enabled,
|
||||
``mtu`` field will be always valid.
|
||||
When ``mtu`` config is not provided by the application, default ``RTE_ETHER_MTU``
|
||||
value will be used.
|
||||
``(struct rte_eth_dev)->data->mtu`` should be updated after MTU set successfully,
|
||||
either by ``rte_eth_dev_configure()`` or ``rte_eth_dev_set_mtu()``.
|
||||
|
||||
An application may need to configure device for a specific Rx packet size, like for
|
||||
cases ``DEV_RX_OFFLOAD_SCATTER`` is not supported and device received packet size
|
||||
can't be bigger than Rx buffer size.
|
||||
To cover these cases an application needs to know the device packet overhead to be
|
||||
able to calculate the ``mtu`` corresponding to a Rx buffer size, for this
|
||||
``(struct rte_eth_dev_info).max_rx_pktlen`` will be kept,
|
||||
the device packet overhead can be calculated as:
|
||||
``(struct rte_eth_dev_info).max_rx_pktlen - (struct rte_eth_dev_info).max_mtu``
|
||||
|
||||
* ethdev: Announce moving from dedicated modify function for each field,
|
||||
to using the general ``rte_flow_modify_field`` action.
|
||||
|
||||
|
@ -162,12 +162,7 @@ Forwarding application is shown below:
|
||||
:end-before: >8 End of initializing a given port.
|
||||
|
||||
The Ethernet ports are configured with default settings using the
|
||||
``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct.
|
||||
|
||||
.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
|
||||
:language: c
|
||||
:start-after: Ethernet ports configured with default settings using struct. 8<
|
||||
:end-before: >8 End of configuration of Ethernet ports.
|
||||
``rte_eth_dev_configure()`` function.
|
||||
|
||||
For this example the ports are set up with 1 RX and 1 TX queue using the
|
||||
``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
|
||||
|
@ -65,7 +65,7 @@ The application has a number of command line options::
|
||||
[--lookup LOOKUP_METHOD]
|
||||
--config(port,queue,lcore)[,(port,queue,lcore)]
|
||||
[--eth-dest=X,MM:MM:MM:MM:MM:MM]
|
||||
[--enable-jumbo [--max-pkt-len PKTLEN]]
|
||||
[--max-pkt-len PKTLEN]
|
||||
[--no-numa]
|
||||
[--hash-entry-num]
|
||||
[--ipv6]
|
||||
@ -95,9 +95,7 @@ Where,
|
||||
|
||||
* ``--eth-dest=X,MM:MM:MM:MM:MM:MM:`` Optional, ethernet destination for port X.
|
||||
|
||||
* ``--enable-jumbo:`` Optional, enables jumbo frames.
|
||||
|
||||
* ``--max-pkt-len:`` Optional, under the premise of enabling jumbo, maximum packet length in decimal (64-9600).
|
||||
* ``--max-pkt-len:`` Optional, maximum packet length in decimal (64-9600).
|
||||
|
||||
* ``--no-numa:`` Optional, disables numa awareness.
|
||||
|
||||
|
@ -236,7 +236,7 @@ The application has a number of command line options:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
./<build_dir>/examples/dpdk-l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME --rule_ipv6 FILENAME [--alg=<val>] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa] [--eth-dest=X,MM:MM:MM:MM:MM:MM]
|
||||
./<build_dir>/examples/dpdk-l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME --rule_ipv6 FILENAME [--alg=<val>] [--max-pkt-len PKTLEN] [--no-numa] [--eth-dest=X,MM:MM:MM:MM:MM:MM]
|
||||
|
||||
|
||||
where,
|
||||
@ -255,8 +255,6 @@ where,
|
||||
* --alg=<val>: optional, ACL classify method to use, one of:
|
||||
``scalar|sse|avx2|neon|altivec|avx512x16|avx512x32``
|
||||
|
||||
* --enable-jumbo: optional, enables jumbo frames
|
||||
|
||||
* --max-pkt-len: optional, maximum packet length in decimal (64-9600)
|
||||
|
||||
* --no-numa: optional, disables numa awareness
|
||||
|
@ -48,7 +48,7 @@ The application has a number of command line options similar to l3fwd::
|
||||
[-P]
|
||||
--config(port,queue,lcore)[,(port,queue,lcore)]
|
||||
[--eth-dest=X,MM:MM:MM:MM:MM:MM]
|
||||
[--enable-jumbo [--max-pkt-len PKTLEN]]
|
||||
[--max-pkt-len PKTLEN]
|
||||
[--no-numa]
|
||||
[--per-port-pool]
|
||||
|
||||
@ -63,9 +63,7 @@ Where,
|
||||
|
||||
* ``--eth-dest=X,MM:MM:MM:MM:MM:MM:`` Optional, ethernet destination for port X.
|
||||
|
||||
* ``--enable-jumbo:`` Optional, enables jumbo frames.
|
||||
|
||||
* ``--max-pkt-len:`` Optional, under the premise of enabling jumbo, maximum packet length in decimal (64-9600).
|
||||
* ``--max-pkt-len:`` Optional, maximum packet length in decimal (64-9600).
|
||||
|
||||
* ``--no-numa:`` Optional, disables numa awareness.
|
||||
|
||||
|
@ -88,7 +88,7 @@ The application has a number of command line options:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
./<build_dir>/examples/dpdk-l3fwd_power [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa]
|
||||
./<build_dir>/examples/dpdk-l3fwd_power [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] [--max-pkt-len PKTLEN] [--no-numa]
|
||||
|
||||
where,
|
||||
|
||||
@ -99,8 +99,6 @@ where,
|
||||
|
||||
* --config (port,queue,lcore)[,(port,queue,lcore)]: determines which queues from which ports are mapped to which cores.
|
||||
|
||||
* --enable-jumbo: optional, enables jumbo frames
|
||||
|
||||
* --max-pkt-len: optional, maximum packet length in decimal (64-9600)
|
||||
|
||||
* --no-numa: optional, disables numa awareness
|
||||
|
@ -59,7 +59,7 @@ The application has a number of command line options::
|
||||
-p PORTMASK [-P]
|
||||
--rx(port,queue,lcore,thread)[,(port,queue,lcore,thread)]
|
||||
--tx(lcore,thread)[,(lcore,thread)]
|
||||
[--enable-jumbo] [--max-pkt-len PKTLEN]] [--no-numa]
|
||||
[--max-pkt-len PKTLEN] [--no-numa]
|
||||
[--hash-entry-num] [--ipv6] [--no-lthreads] [--stat-lcore lcore]
|
||||
[--parse-ptype]
|
||||
|
||||
@ -80,8 +80,6 @@ Where:
|
||||
the lcore the thread runs on, and the id of RX thread with which it is
|
||||
associated. The parameters are explained below.
|
||||
|
||||
* ``--enable-jumbo``: optional, enables jumbo frames.
|
||||
|
||||
* ``--max-pkt-len``: optional, maximum packet length in decimal (64-9600).
|
||||
|
||||
* ``--no-numa``: optional, disables numa awareness.
|
||||
|
@ -106,12 +106,7 @@ Forwarding application is shown below:
|
||||
:end-before: >8 End of main functional part of port initialization.
|
||||
|
||||
The Ethernet ports are configured with default settings using the
|
||||
``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct:
|
||||
|
||||
.. literalinclude:: ../../../examples/skeleton/basicfwd.c
|
||||
:language: c
|
||||
:start-after: Configuration of ethernet ports. 8<
|
||||
:end-before: >8 End of configuration of ethernet ports.
|
||||
``rte_eth_dev_configure()`` function.
|
||||
|
||||
For this example the ports are set up with 1 RX and 1 TX queue using the
|
||||
``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
|
||||
|
@ -1636,9 +1636,6 @@ atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
|
||||
return -EINVAL;
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1059,17 +1059,18 @@ static int
|
||||
avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
|
||||
struct avp_dev *avp)
|
||||
{
|
||||
unsigned int max_rx_pkt_len;
|
||||
unsigned int max_rx_pktlen;
|
||||
|
||||
max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
max_rx_pktlen = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN;
|
||||
|
||||
if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
|
||||
(max_rx_pkt_len > avp->host_mbuf_size)) {
|
||||
if (max_rx_pktlen > avp->guest_mbuf_size ||
|
||||
max_rx_pktlen > avp->host_mbuf_size) {
|
||||
/*
|
||||
* If the guest MTU is greater than either the host or guest
|
||||
* buffers then chained mbufs have to be enabled in the TX
|
||||
* direction. It is assumed that the application will not need
|
||||
* to send packets larger than their max_rx_pkt_len (MRU).
|
||||
* to send packets larger than their MTU.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
@ -1124,7 +1125,7 @@ avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
|
||||
PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
|
||||
avp->max_rx_pkt_len,
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN,
|
||||
avp->host_mbuf_size,
|
||||
avp->guest_mbuf_size);
|
||||
|
||||
@ -1889,8 +1890,8 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
* function; send it truncated to avoid the performance
|
||||
* hit of having to manage returning the already
|
||||
* allocated buffer to the free list. This should not
|
||||
* happen since the application should have set the
|
||||
* max_rx_pkt_len based on its MTU and it should be
|
||||
* happen since the application should have not send
|
||||
* packages larger than its MTU and it should be
|
||||
* policing its own packet sizes.
|
||||
*/
|
||||
txq->errors++;
|
||||
|
@ -350,7 +350,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
|
||||
struct axgbe_port *pdata = dev->data->dev_private;
|
||||
int ret;
|
||||
struct rte_eth_dev_data *dev_data = dev->data;
|
||||
uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
uint16_t max_pkt_len;
|
||||
|
||||
dev->dev_ops = &axgbe_eth_dev_ops;
|
||||
|
||||
@ -383,6 +383,8 @@ axgbe_dev_start(struct rte_eth_dev *dev)
|
||||
|
||||
rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
|
||||
rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
|
||||
|
||||
max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
|
||||
max_pkt_len > pdata->rx_buf_size)
|
||||
dev_data->scattered_rx = 1;
|
||||
@ -1490,7 +1492,7 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
dev->data->port_id);
|
||||
return -EBUSY;
|
||||
}
|
||||
if (frame_size > AXGBE_ETH_MAX_LEN) {
|
||||
if (mtu > RTE_ETHER_MTU) {
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
val = 1;
|
||||
@ -1500,7 +1502,6 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
val = 0;
|
||||
}
|
||||
AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -175,16 +175,12 @@ static int
|
||||
bnx2x_dev_configure(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct bnx2x_softc *sc = dev->data->dev_private;
|
||||
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
||||
|
||||
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
|
||||
|
||||
PMD_INIT_FUNC_TRACE(sc);
|
||||
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
dev->data->mtu = sc->mtu;
|
||||
}
|
||||
sc->mtu = dev->data->dev_conf.rxmode.mtu;
|
||||
|
||||
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
|
||||
PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
|
||||
|
@ -1161,13 +1161,8 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
|
||||
rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
|
||||
eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
eth_dev->data->mtu =
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
|
||||
BNXT_NUM_VLANS;
|
||||
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
|
||||
}
|
||||
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
|
||||
|
||||
return 0;
|
||||
|
||||
resource_error:
|
||||
@ -1205,6 +1200,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
|
||||
*/
|
||||
static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
|
||||
uint16_t buf_size;
|
||||
int i;
|
||||
|
||||
@ -1219,7 +1215,7 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
|
||||
|
||||
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
|
||||
RTE_PKTMBUF_HEADROOM);
|
||||
if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
|
||||
if (eth_dev->data->mtu + overhead > buf_size)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@ -3030,6 +3026,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
|
||||
|
||||
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
{
|
||||
uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
|
||||
struct bnxt *bp = eth_dev->data->dev_private;
|
||||
uint32_t new_pkt_size;
|
||||
uint32_t rc = 0;
|
||||
@ -3043,8 +3040,7 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
if (!eth_dev->data->nb_rx_queues)
|
||||
return rc;
|
||||
|
||||
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
|
||||
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
|
||||
new_pkt_size = new_mtu + overhead;
|
||||
|
||||
/*
|
||||
* Disallow any MTU change that would require scattered receive support
|
||||
@ -3071,7 +3067,7 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
}
|
||||
|
||||
/* Is there a change in mtu setting? */
|
||||
if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
|
||||
if (eth_dev->data->mtu == new_mtu)
|
||||
return rc;
|
||||
|
||||
for (i = 0; i < bp->nr_vnics; i++) {
|
||||
@ -3093,9 +3089,6 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
}
|
||||
}
|
||||
|
||||
if (!rc)
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
|
||||
|
||||
if (bnxt_hwrm_config_host_mtu(bp))
|
||||
PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n");
|
||||
|
||||
|
@ -1721,8 +1721,8 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
|
||||
slave_eth_dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_VLAN_FILTER;
|
||||
|
||||
slave_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
|
||||
bonded_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
slave_eth_dev->data->dev_conf.rxmode.mtu =
|
||||
bonded_eth_dev->data->dev_conf.rxmode.mtu;
|
||||
|
||||
if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
|
@ -209,7 +209,7 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
|
||||
mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
|
||||
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
|
||||
if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
|
||||
dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
|
||||
dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
|
||||
}
|
||||
@ -220,18 +220,13 @@ nix_recalc_mtu(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct rte_eth_dev_data *data = eth_dev->data;
|
||||
struct cnxk_eth_rxq_sp *rxq;
|
||||
uint16_t mtu;
|
||||
int rc;
|
||||
|
||||
rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
|
||||
/* Setup scatter mode if needed by jumbo */
|
||||
nix_enable_mseg_on_jumbo(rxq);
|
||||
|
||||
/* Setup MTU based on max_rx_pkt_len */
|
||||
mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
|
||||
CNXK_NIX_MAX_VTAG_ACT_SIZE;
|
||||
|
||||
rc = cnxk_nix_mtu_set(eth_dev, mtu);
|
||||
rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
|
||||
if (rc)
|
||||
plt_err("Failed to set default MTU size, rc=%d", rc);
|
||||
|
||||
|
@ -440,16 +440,10 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
frame_size += RTE_ETHER_CRC_LEN;
|
||||
|
||||
if (frame_size > RTE_ETHER_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
/* Update max_rx_pkt_len */
|
||||
data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
exit:
|
||||
return rc;
|
||||
}
|
||||
|
@ -310,11 +310,11 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
return err;
|
||||
|
||||
/* Must accommodate at least RTE_ETHER_MIN_MTU */
|
||||
if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
|
||||
if (mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
|
||||
return -EINVAL;
|
||||
|
||||
/* set to jumbo mode if needed */
|
||||
if (new_mtu > CXGBE_ETH_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
eth_dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
@ -323,9 +323,6 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
|
||||
err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
|
||||
-1, -1, true);
|
||||
if (!err)
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -623,7 +620,8 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
const struct rte_eth_rxconf *rx_conf __rte_unused,
|
||||
struct rte_mempool *mp)
|
||||
{
|
||||
unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
unsigned int pkt_len = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN;
|
||||
struct port_info *pi = eth_dev->data->dev_private;
|
||||
struct adapter *adapter = pi->adapter;
|
||||
struct rte_eth_dev_info dev_info;
|
||||
@ -682,7 +680,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
rxq->fl.size = temp_nb_desc;
|
||||
|
||||
/* Set to jumbo mode if necessary */
|
||||
if (pkt_len > CXGBE_ETH_MAX_LEN)
|
||||
if (eth_dev->data->mtu > RTE_ETHER_MTU)
|
||||
eth_dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
|
@ -1661,8 +1661,7 @@ int cxgbe_link_start(struct port_info *pi)
|
||||
unsigned int mtu;
|
||||
int ret;
|
||||
|
||||
mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
|
||||
mtu = pi->eth_dev->data->mtu;
|
||||
|
||||
conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
|
||||
|
||||
|
@ -1113,7 +1113,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
|
||||
u32 wr_mid;
|
||||
u64 cntrl, *end;
|
||||
bool v6;
|
||||
u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
u32 max_pkt_len;
|
||||
|
||||
/* Reject xmit if queue is stopped */
|
||||
if (unlikely(txq->flags & EQ_STOPPED))
|
||||
@ -1129,6 +1129,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
max_pkt_len = txq->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
|
||||
(unlikely(m->pkt_len > max_pkt_len)))
|
||||
goto out_free;
|
||||
|
@ -187,15 +187,13 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (frame_size > DPAA_ETH_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
fman_if_set_maxfrm(dev->process_private, frame_size);
|
||||
|
||||
return 0;
|
||||
@ -213,6 +211,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
|
||||
struct fman_if *fif = dev->process_private;
|
||||
struct __fman_if *__fif;
|
||||
struct rte_intr_handle *intr_handle;
|
||||
uint32_t max_rx_pktlen;
|
||||
int speed, duplex;
|
||||
int ret;
|
||||
|
||||
@ -238,27 +237,17 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
|
||||
tx_offloads, dev_tx_offloads_nodis);
|
||||
}
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
uint32_t max_len;
|
||||
|
||||
DPAA_PMD_DEBUG("enabling jumbo");
|
||||
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
|
||||
DPAA_MAX_RX_PKT_LEN)
|
||||
max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
else {
|
||||
DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
|
||||
"supported is %d",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
DPAA_MAX_RX_PKT_LEN);
|
||||
max_len = DPAA_MAX_RX_PKT_LEN;
|
||||
}
|
||||
|
||||
fman_if_set_maxfrm(dev->process_private, max_len);
|
||||
dev->data->mtu = max_len
|
||||
- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
|
||||
max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
|
||||
if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) {
|
||||
DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
|
||||
"supported is %d",
|
||||
max_rx_pktlen, DPAA_MAX_RX_PKT_LEN);
|
||||
max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
|
||||
}
|
||||
|
||||
fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
|
||||
DPAA_PMD_DEBUG("enabling scatter mode");
|
||||
fman_if_set_sg(dev->process_private, 1);
|
||||
@ -936,6 +925,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
u32 flags = 0;
|
||||
int ret;
|
||||
u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
|
||||
uint32_t max_rx_pktlen;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
@ -977,17 +967,17 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
|
||||
VLAN_TAG_SIZE;
|
||||
/* Max packet can fit in single buffer */
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
|
||||
if (max_rx_pktlen <= buffsz) {
|
||||
;
|
||||
} else if (dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_SCATTER) {
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
|
||||
buffsz * DPAA_SGT_MAX_ENTRIES) {
|
||||
DPAA_PMD_ERR("max RxPkt size %d too big to fit "
|
||||
if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
|
||||
DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
|
||||
"MaxSGlist %d",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
buffsz * DPAA_SGT_MAX_ENTRIES);
|
||||
max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES);
|
||||
rte_errno = EOVERFLOW;
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -995,8 +985,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
|
||||
" larger than a single mbuf (%u) and scattered"
|
||||
" mode has not been requested",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
buffsz - RTE_PKTMBUF_HEADROOM);
|
||||
max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM);
|
||||
}
|
||||
|
||||
dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
|
||||
@ -1034,8 +1023,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
|
||||
dpaa_intf->valid = 1;
|
||||
DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
|
||||
fman_if_get_sg_enable(fif),
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
fman_if_get_sg_enable(fif), max_rx_pktlen);
|
||||
/* checking if push mode only, no error check for now */
|
||||
if (!rxq->is_static &&
|
||||
dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
|
||||
|
@ -540,6 +540,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
|
||||
int tx_l3_csum_offload = false;
|
||||
int tx_l4_csum_offload = false;
|
||||
int ret, tc_index;
|
||||
uint32_t max_rx_pktlen;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
@ -559,25 +560,19 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
|
||||
tx_offloads, dev_tx_offloads_nodis);
|
||||
}
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
|
||||
ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
|
||||
priv->token, eth_conf->rxmode.max_rx_pkt_len
|
||||
- RTE_ETHER_CRC_LEN);
|
||||
if (ret) {
|
||||
DPAA2_PMD_ERR(
|
||||
"Unable to set mtu. check config");
|
||||
return ret;
|
||||
}
|
||||
dev->data->mtu =
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
|
||||
VLAN_TAG_SIZE;
|
||||
DPAA2_PMD_INFO("MTU configured for the device: %d",
|
||||
dev->data->mtu);
|
||||
} else {
|
||||
return -1;
|
||||
max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
|
||||
if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
|
||||
ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
|
||||
priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
|
||||
if (ret != 0) {
|
||||
DPAA2_PMD_ERR("Unable to set mtu. check config");
|
||||
return ret;
|
||||
}
|
||||
DPAA2_PMD_INFO("MTU configured for the device: %d",
|
||||
dev->data->mtu);
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
|
||||
@ -1470,15 +1465,13 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
if (frame_size > DPAA2_ETH_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
/* Set the Max Rx frame length as 'mtu' +
|
||||
* Maximum Ethernet header length
|
||||
*/
|
||||
|
@ -1816,7 +1816,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
||||
|
||||
/* switch to jumbo mode if needed */
|
||||
if (frame_size > E1000_ETH_MAX_LEN) {
|
||||
if (mtu > RTE_ETHER_MTU) {
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
rctl |= E1000_RCTL_LPE;
|
||||
@ -1827,8 +1827,6 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
}
|
||||
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2677,9 +2677,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
|
||||
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
|
||||
|
||||
/* Update maximum packet length */
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
E1000_WRITE_REG(hw, E1000_RLPML,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
E1000_WRITE_REG(hw, E1000_RLPML, dev->data->mtu + E1000_ETH_OVERHEAD);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2695,10 +2693,8 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
|
||||
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
|
||||
|
||||
/* Update maximum packet length */
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
E1000_WRITE_REG(hw, E1000_RLPML,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
VLAN_TAG_SIZE);
|
||||
E1000_WRITE_REG(hw, E1000_RLPML,
|
||||
dev->data->mtu + E1000_ETH_OVERHEAD + VLAN_TAG_SIZE);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -4396,7 +4392,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
||||
|
||||
/* switch to jumbo mode if needed */
|
||||
if (frame_size > E1000_ETH_MAX_LEN) {
|
||||
if (mtu > RTE_ETHER_MTU) {
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
rctl |= E1000_RCTL_LPE;
|
||||
@ -4407,11 +4403,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
}
|
||||
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_RLPML,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
E1000_WRITE_REG(hw, E1000_RLPML, frame_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2312,6 +2312,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
|
||||
uint32_t srrctl;
|
||||
uint16_t buf_size;
|
||||
uint16_t rctl_bsize;
|
||||
uint32_t max_len;
|
||||
uint16_t i;
|
||||
int ret;
|
||||
|
||||
@ -2330,9 +2331,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
|
||||
/*
|
||||
* Configure support of jumbo frames, if any.
|
||||
*/
|
||||
max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
|
||||
rctl |= E1000_RCTL_LPE;
|
||||
|
||||
/*
|
||||
@ -2410,8 +2410,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
|
||||
E1000_SRRCTL_BSIZEPKT_SHIFT);
|
||||
|
||||
/* It adds dual VLAN length for supporting dual VLAN */
|
||||
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
2 * VLAN_TAG_SIZE) > buf_size){
|
||||
if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
|
||||
if (!dev->data->scattered_rx)
|
||||
PMD_INIT_LOG(DEBUG,
|
||||
"forcing scatter mode");
|
||||
@ -2635,15 +2634,15 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
|
||||
uint32_t srrctl;
|
||||
uint16_t buf_size;
|
||||
uint16_t rctl_bsize;
|
||||
uint32_t max_len;
|
||||
uint16_t i;
|
||||
int ret;
|
||||
|
||||
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
/* setup MTU */
|
||||
e1000_rlpml_set_vf(hw,
|
||||
(uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
VLAN_TAG_SIZE));
|
||||
max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
|
||||
e1000_rlpml_set_vf(hw, (uint16_t)(max_len + VLAN_TAG_SIZE));
|
||||
|
||||
/* Configure and enable each RX queue. */
|
||||
rctl_bsize = 0;
|
||||
@ -2700,8 +2699,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
|
||||
E1000_SRRCTL_BSIZEPKT_SHIFT);
|
||||
|
||||
/* It adds dual VLAN length for supporting dual VLAN */
|
||||
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
2 * VLAN_TAG_SIZE) > buf_size){
|
||||
if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
|
||||
if (!dev->data->scattered_rx)
|
||||
PMD_INIT_LOG(DEBUG,
|
||||
"forcing scatter mode");
|
||||
|
@ -677,26 +677,14 @@ static int ena_queue_start_all(struct rte_eth_dev *dev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
|
||||
{
|
||||
uint32_t max_frame_len = adapter->max_mtu;
|
||||
|
||||
if (adapter->edev_data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
max_frame_len =
|
||||
adapter->edev_data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
|
||||
return max_frame_len;
|
||||
}
|
||||
|
||||
static int ena_check_valid_conf(struct ena_adapter *adapter)
|
||||
{
|
||||
uint32_t max_frame_len = ena_get_mtu_conf(adapter);
|
||||
uint32_t mtu = adapter->edev_data->mtu;
|
||||
|
||||
if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
|
||||
if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n",
|
||||
max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
|
||||
mtu, adapter->max_mtu, ENA_MIN_MTU);
|
||||
return ENA_COM_UNSUPPORTED;
|
||||
}
|
||||
|
||||
@ -869,10 +857,10 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
ena_dev = &adapter->ena_dev;
|
||||
ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
|
||||
|
||||
if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
|
||||
if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n",
|
||||
mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
|
||||
mtu, adapter->max_mtu, ENA_MIN_MTU);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1943,7 +1931,10 @@ static int ena_infos_get(struct rte_eth_dev *dev,
|
||||
dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
|
||||
|
||||
dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
|
||||
dev_info->max_rx_pktlen = adapter->max_mtu;
|
||||
dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN;
|
||||
dev_info->min_mtu = ENA_MIN_MTU;
|
||||
dev_info->max_mtu = adapter->max_mtu;
|
||||
dev_info->max_mac_addrs = 1;
|
||||
|
||||
dev_info->max_rx_queues = adapter->max_num_io_queues;
|
||||
|
@ -681,7 +681,7 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (frame_size > ENETC_ETH_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
@ -691,8 +691,6 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
|
||||
enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
|
||||
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
/*setting the MTU*/
|
||||
enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
|
||||
ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
|
||||
@ -709,23 +707,15 @@ enetc_dev_configure(struct rte_eth_dev *dev)
|
||||
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
|
||||
uint64_t rx_offloads = eth_conf->rxmode.offloads;
|
||||
uint32_t checksum = L3_CKSUM | L4_CKSUM;
|
||||
uint32_t max_len;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
uint32_t max_len;
|
||||
|
||||
max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
|
||||
enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
|
||||
ENETC_SET_MAXFRM(max_len));
|
||||
enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
|
||||
ENETC_MAC_MAXFRM_SIZE);
|
||||
enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
|
||||
2 * ENETC_MAC_MAXFRM_SIZE);
|
||||
dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
|
||||
RTE_ETHER_CRC_LEN;
|
||||
}
|
||||
max_len = dev->data->dev_conf.rxmode.mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN;
|
||||
enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(max_len));
|
||||
enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
|
||||
enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
|
||||
int config;
|
||||
|
@ -459,7 +459,7 @@ static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
|
||||
* max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
|
||||
* a hint to the driver to size receive buffers accordingly so that
|
||||
* larger-than-vnic-mtu packets get truncated.. For DPDK, we let
|
||||
* the user decide the buffer size via rxmode.max_rx_pkt_len, basically
|
||||
* the user decide the buffer size via rxmode.mtu, basically
|
||||
* ignoring vNIC mtu.
|
||||
*/
|
||||
device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
|
||||
|
@ -282,7 +282,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
|
||||
struct rq_enet_desc *rqd = rq->ring.descs;
|
||||
unsigned i;
|
||||
dma_addr_t dma_addr;
|
||||
uint32_t max_rx_pkt_len;
|
||||
uint32_t max_rx_pktlen;
|
||||
uint16_t rq_buf_len;
|
||||
|
||||
if (!rq->in_use)
|
||||
@ -293,16 +293,16 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
|
||||
|
||||
/*
|
||||
* If *not* using scatter and the mbuf size is greater than the
|
||||
* requested max packet size (max_rx_pkt_len), then reduce the
|
||||
* posted buffer size to max_rx_pkt_len. HW still receives packets
|
||||
* larger than max_rx_pkt_len, but they will be truncated, which we
|
||||
* requested max packet size (mtu + eth overhead), then reduce the
|
||||
* posted buffer size to max packet size. HW still receives packets
|
||||
* larger than max packet size, but they will be truncated, which we
|
||||
* drop in the rx handler. Not ideal, but better than returning
|
||||
* large packets when the user is not expecting them.
|
||||
*/
|
||||
max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
|
||||
rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
|
||||
if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
|
||||
rq_buf_len = max_rx_pkt_len;
|
||||
if (max_rx_pktlen < rq_buf_len && !rq->data_queue_enable)
|
||||
rq_buf_len = max_rx_pktlen;
|
||||
for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
|
||||
mb = rte_mbuf_raw_alloc(rq->mp);
|
||||
if (mb == NULL) {
|
||||
@ -818,7 +818,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
|
||||
unsigned int mbuf_size, mbufs_per_pkt;
|
||||
unsigned int nb_sop_desc, nb_data_desc;
|
||||
uint16_t min_sop, max_sop, min_data, max_data;
|
||||
uint32_t max_rx_pkt_len;
|
||||
uint32_t max_rx_pktlen;
|
||||
|
||||
/*
|
||||
* Representor uses a reserved PF queue. Translate representor
|
||||
@ -854,23 +854,23 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
|
||||
|
||||
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
|
||||
RTE_PKTMBUF_HEADROOM);
|
||||
/* max_rx_pkt_len includes the ethernet header and CRC. */
|
||||
max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
/* max_rx_pktlen includes the ethernet header and CRC. */
|
||||
max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
|
||||
|
||||
if (enic->rte_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_SCATTER) {
|
||||
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
|
||||
/* ceil((max pkt len)/mbuf_size) */
|
||||
mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
|
||||
mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
|
||||
} else {
|
||||
dev_info(enic, "Scatter rx mode disabled\n");
|
||||
mbufs_per_pkt = 1;
|
||||
if (max_rx_pkt_len > mbuf_size) {
|
||||
if (max_rx_pktlen > mbuf_size) {
|
||||
dev_warning(enic, "The maximum Rx packet size (%u) is"
|
||||
" larger than the mbuf size (%u), and"
|
||||
" scatter is disabled. Larger packets will"
|
||||
" be truncated.\n",
|
||||
max_rx_pkt_len, mbuf_size);
|
||||
max_rx_pktlen, mbuf_size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -879,16 +879,15 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
|
||||
rq_sop->data_queue_enable = 1;
|
||||
rq_data->in_use = 1;
|
||||
/*
|
||||
* HW does not directly support rxmode.max_rx_pkt_len. HW always
|
||||
* HW does not directly support MTU. HW always
|
||||
* receives packet sizes up to the "max" MTU.
|
||||
* If not using scatter, we can achieve the effect of dropping
|
||||
* larger packets by reducing the size of posted buffers.
|
||||
* See enic_alloc_rx_queue_mbufs().
|
||||
*/
|
||||
if (max_rx_pkt_len <
|
||||
enic_mtu_to_max_rx_pktlen(enic->max_mtu)) {
|
||||
dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
|
||||
" when scatter rx mode is in use.\n");
|
||||
if (enic->rte_dev->data->mtu < enic->max_mtu) {
|
||||
dev_warning(enic,
|
||||
"mtu is ignored when scatter rx mode is in use.\n");
|
||||
}
|
||||
} else {
|
||||
dev_info(enic, "Rq %u Scatter rx mode not being used\n",
|
||||
@ -931,7 +930,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
|
||||
if (mbufs_per_pkt > 1) {
|
||||
dev_info(enic, "For max packet size %u and mbuf size %u valid"
|
||||
" rx descriptor range is %u to %u\n",
|
||||
max_rx_pkt_len, mbuf_size, min_sop + min_data,
|
||||
max_rx_pktlen, mbuf_size, min_sop + min_data,
|
||||
max_sop + max_data);
|
||||
}
|
||||
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
|
||||
@ -1634,11 +1633,6 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
|
||||
"MTU (%u) is greater than value configured in NIC (%u)\n",
|
||||
new_mtu, config_mtu);
|
||||
|
||||
/* Update the MTU and maximum packet length */
|
||||
eth_dev->data->mtu = new_mtu;
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
|
||||
enic_mtu_to_max_rx_pktlen(new_mtu);
|
||||
|
||||
/*
|
||||
* If the device has not started (enic_enable), nothing to do.
|
||||
* Later, enic_enable() will set up RQs reflecting the new maximum
|
||||
|
@ -757,7 +757,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
|
||||
FM10K_SRRCTL_LOOPBACK_SUPPRESS);
|
||||
|
||||
/* It adds dual VLAN length for supporting dual VLAN */
|
||||
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
|
||||
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
|
||||
rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
|
||||
uint32_t reg;
|
||||
|
@ -315,19 +315,19 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
|
||||
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
|
||||
|
||||
/* mtu size is 256~9600 */
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len >
|
||||
HINIC_MAX_JUMBO_FRAME_SIZE) {
|
||||
if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
|
||||
HINIC_MIN_FRAME_SIZE ||
|
||||
HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) >
|
||||
HINIC_MAX_JUMBO_FRAME_SIZE) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Max rx pkt len out of range, get max_rx_pkt_len:%d, "
|
||||
"Packet length out of range, get packet length:%d, "
|
||||
"expect between %d and %d",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu),
|
||||
HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nic_dev->mtu_size =
|
||||
HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
nic_dev->mtu_size = dev->data->dev_conf.rxmode.mtu;
|
||||
|
||||
/* rss template */
|
||||
err = hinic_config_mq_mode(dev, TRUE);
|
||||
@ -1534,7 +1534,6 @@ static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
|
||||
static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
{
|
||||
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
|
||||
uint32_t frame_size;
|
||||
int ret = 0;
|
||||
|
||||
PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
|
||||
@ -1552,16 +1551,13 @@ static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* update max frame size */
|
||||
frame_size = HINIC_MTU_TO_PKTLEN(mtu);
|
||||
if (frame_size > HINIC_ETH_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
nic_dev->mtu_size = mtu;
|
||||
|
||||
return ret;
|
||||
|
@ -2366,41 +2366,6 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf)
|
||||
{
|
||||
struct hns3_adapter *hns = dev->data->dev_private;
|
||||
struct hns3_hw *hw = &hns->hw;
|
||||
uint32_t max_rx_pkt_len;
|
||||
uint16_t mtu;
|
||||
int ret;
|
||||
|
||||
if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If jumbo frames are enabled, MTU needs to be refreshed
|
||||
* according to the maximum RX packet length.
|
||||
*/
|
||||
max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
|
||||
if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
|
||||
max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
|
||||
hns3_err(hw, "maximum Rx packet length must be greater than %u "
|
||||
"and no more than %u when jumbo frame enabled.",
|
||||
(uint16_t)HNS3_DEFAULT_FRAME_LEN,
|
||||
(uint16_t)HNS3_MAX_FRAME_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
|
||||
ret = hns3_dev_mtu_set(dev, mtu);
|
||||
if (ret)
|
||||
return ret;
|
||||
dev->data->mtu = mtu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
hns3_setup_dcb(struct rte_eth_dev *dev)
|
||||
{
|
||||
@ -2515,8 +2480,8 @@ hns3_dev_configure(struct rte_eth_dev *dev)
|
||||
goto cfg_err;
|
||||
}
|
||||
|
||||
ret = hns3_refresh_mtu(dev, conf);
|
||||
if (ret)
|
||||
ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
|
||||
if (ret != 0)
|
||||
goto cfg_err;
|
||||
|
||||
ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
|
||||
@ -2611,7 +2576,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
}
|
||||
|
||||
rte_spinlock_lock(&hw->lock);
|
||||
is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false;
|
||||
is_jumbo_frame = mtu > RTE_ETHER_MTU ? true : false;
|
||||
frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
|
||||
|
||||
/*
|
||||
@ -2632,7 +2597,6 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
rte_spinlock_unlock(&hw->lock);
|
||||
|
||||
return 0;
|
||||
|
@ -784,8 +784,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
|
||||
uint16_t nb_rx_q = dev->data->nb_rx_queues;
|
||||
uint16_t nb_tx_q = dev->data->nb_tx_queues;
|
||||
struct rte_eth_rss_conf rss_conf;
|
||||
uint32_t max_rx_pkt_len;
|
||||
uint16_t mtu;
|
||||
bool gro_en;
|
||||
int ret;
|
||||
|
||||
@ -825,28 +823,9 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
|
||||
goto cfg_err;
|
||||
}
|
||||
|
||||
/*
|
||||
* If jumbo frames are enabled, MTU needs to be refreshed
|
||||
* according to the maximum RX packet length.
|
||||
*/
|
||||
if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
|
||||
if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
|
||||
max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
|
||||
hns3_err(hw, "maximum Rx packet length must be greater "
|
||||
"than %u and less than %u when jumbo frame enabled.",
|
||||
(uint16_t)HNS3_DEFAULT_FRAME_LEN,
|
||||
(uint16_t)HNS3_MAX_FRAME_LEN);
|
||||
ret = -EINVAL;
|
||||
goto cfg_err;
|
||||
}
|
||||
|
||||
mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
|
||||
ret = hns3vf_dev_mtu_set(dev, mtu);
|
||||
if (ret)
|
||||
goto cfg_err;
|
||||
dev->data->mtu = mtu;
|
||||
}
|
||||
ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
|
||||
if (ret != 0)
|
||||
goto cfg_err;
|
||||
|
||||
ret = hns3vf_dev_configure_vlan(dev);
|
||||
if (ret)
|
||||
@ -935,7 +914,6 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
rte_spinlock_unlock(&hw->lock);
|
||||
|
||||
return 0;
|
||||
|
@ -1747,18 +1747,18 @@ hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
|
||||
uint16_t nb_desc)
|
||||
{
|
||||
struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
|
||||
struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
|
||||
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
|
||||
uint32_t frame_size = dev->data->mtu + HNS3_ETH_OVERHEAD;
|
||||
uint16_t min_vec_bds;
|
||||
|
||||
/*
|
||||
* HNS3 hardware network engine set scattered as default. If the driver
|
||||
* is not work in scattered mode and the pkts greater than buf_size
|
||||
* but smaller than max_rx_pkt_len will be distributed to multiple BDs.
|
||||
* but smaller than frame size will be distributed to multiple BDs.
|
||||
* Driver cannot handle this situation.
|
||||
*/
|
||||
if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
|
||||
hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
|
||||
if (!hw->data->scattered_rx && frame_size > buf_size) {
|
||||
hns3_err(hw, "frame size is not allowed to be set greater "
|
||||
"than rx_buf_len if scattered is off.");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1970,7 +1970,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
|
||||
}
|
||||
|
||||
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
|
||||
dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
|
||||
dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
|
||||
dev->data->scattered_rx = true;
|
||||
}
|
||||
|
||||
|
@ -11437,14 +11437,10 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (frame_size > I40E_ETH_MAX_LEN)
|
||||
dev_data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev_data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2899,8 +2899,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
|
||||
}
|
||||
|
||||
rxq->max_pkt_len =
|
||||
RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
|
||||
rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
|
||||
data->mtu + I40E_ETH_OVERHEAD);
|
||||
if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
|
||||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
|
||||
|
@ -576,13 +576,14 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
|
||||
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct rte_eth_dev_data *dev_data = dev->data;
|
||||
uint16_t buf_size, max_pkt_len;
|
||||
uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
|
||||
|
||||
buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
/* Calculate the maximum packet length allowed */
|
||||
max_pkt_len = RTE_MIN((uint32_t)
|
||||
rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
frame_size);
|
||||
|
||||
/* Check if the jumbo frame and maximum packet length are set
|
||||
* correctly.
|
||||
@ -839,7 +840,7 @@ iavf_dev_start(struct rte_eth_dev *dev)
|
||||
|
||||
adapter->stopped = 0;
|
||||
|
||||
vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
|
||||
vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
|
||||
dev->data->nb_tx_queues);
|
||||
num_queue_pairs = vf->num_queue_pairs;
|
||||
@ -1472,15 +1473,13 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (frame_size > IAVF_ETH_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -66,9 +66,8 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
|
||||
buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
|
||||
rxq->rx_hdr_len = 0;
|
||||
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
|
||||
max_pkt_len = RTE_MIN((uint32_t)
|
||||
ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
|
||||
dev->data->mtu + ICE_ETH_OVERHEAD);
|
||||
|
||||
/* Check if the jumbo frame and maximum packet length are set
|
||||
* correctly.
|
||||
|
@ -3603,8 +3603,8 @@ ice_dev_start(struct rte_eth_dev *dev)
|
||||
pf->adapter_stopped = false;
|
||||
|
||||
/* Set the max frame size to default value*/
|
||||
max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
|
||||
pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
|
||||
max_frame_size = pf->dev_data->mtu ?
|
||||
pf->dev_data->mtu + ICE_ETH_OVERHEAD :
|
||||
ICE_FRAME_SIZE_MAX;
|
||||
|
||||
/* Set the max frame size to HW*/
|
||||
@ -3992,14 +3992,10 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (frame_size > ICE_ETH_MAX_LEN)
|
||||
dev_data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev_data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -271,15 +271,16 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
|
||||
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
|
||||
uint32_t regval;
|
||||
struct ice_adapter *ad = rxq->vsi->adapter;
|
||||
uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
|
||||
|
||||
/* Set buffer size as the head split is disabled. */
|
||||
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
|
||||
RTE_PKTMBUF_HEADROOM);
|
||||
rxq->rx_hdr_len = 0;
|
||||
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
|
||||
rxq->max_pkt_len = RTE_MIN((uint32_t)
|
||||
ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
|
||||
dev_data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
rxq->max_pkt_len =
|
||||
RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
|
||||
frame_size);
|
||||
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
|
||||
@ -385,11 +386,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
|
||||
RTE_PKTMBUF_HEADROOM);
|
||||
|
||||
/* Check if scattered RX needs to be used. */
|
||||
if (rxq->max_pkt_len > buf_size)
|
||||
if (frame_size > buf_size)
|
||||
dev_data->scattered_rx = 1;
|
||||
|
||||
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
|
||||
|
@ -20,13 +20,6 @@
|
||||
|
||||
#define IGC_INTEL_VENDOR_ID 0x8086
|
||||
|
||||
/*
|
||||
* The overhead from MTU to max frame size.
|
||||
* Considering VLAN so tag needs to be counted.
|
||||
*/
|
||||
#define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
|
||||
|
||||
#define IGC_FC_PAUSE_TIME 0x0680
|
||||
#define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
|
||||
#define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
|
||||
@ -1601,21 +1594,15 @@ eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
|
||||
/* switch to jumbo mode if needed */
|
||||
if (mtu > RTE_ETHER_MTU) {
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
rctl |= IGC_RCTL_LPE;
|
||||
} else {
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
rctl &= ~IGC_RCTL_LPE;
|
||||
}
|
||||
IGC_WRITE_REG(hw, IGC_RCTL, rctl);
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
IGC_WRITE_REG(hw, IGC_RLPML,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2485,6 +2472,7 @@ static int
|
||||
igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
|
||||
uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
|
||||
uint32_t ctrl_ext;
|
||||
|
||||
ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
|
||||
@ -2493,23 +2481,14 @@ igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
|
||||
if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
|
||||
return 0;
|
||||
|
||||
if ((dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
|
||||
goto write_ext_vlan;
|
||||
|
||||
/* Update maximum packet length */
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
|
||||
RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
|
||||
if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
|
||||
PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
|
||||
frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
|
||||
IGC_WRITE_REG(hw, IGC_RLPML,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE);
|
||||
|
||||
write_ext_vlan:
|
||||
IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
|
||||
return 0;
|
||||
}
|
||||
@ -2518,6 +2497,7 @@ static int
|
||||
igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
|
||||
uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
|
||||
uint32_t ctrl_ext;
|
||||
|
||||
ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
|
||||
@ -2526,23 +2506,14 @@ igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
|
||||
if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
|
||||
return 0;
|
||||
|
||||
if ((dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
|
||||
goto write_ext_vlan;
|
||||
|
||||
/* Update maximum packet length */
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
|
||||
MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
|
||||
if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) {
|
||||
PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
|
||||
frame_size, MAX_RX_JUMBO_FRAME_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
|
||||
IGC_WRITE_REG(hw, IGC_RLPML,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
|
||||
|
||||
write_ext_vlan:
|
||||
IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
|
||||
return 0;
|
||||
}
|
||||
|
@ -35,6 +35,13 @@ extern "C" {
|
||||
#define IGC_HKEY_REG_SIZE IGC_DEFAULT_REG_SIZE
|
||||
#define IGC_HKEY_SIZE (IGC_HKEY_REG_SIZE * IGC_HKEY_MAX_INDEX)
|
||||
|
||||
/*
|
||||
* The overhead from MTU to max frame size.
|
||||
* Considering VLAN so tag needs to be counted.
|
||||
*/
|
||||
#define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2)
|
||||
|
||||
/*
|
||||
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
|
||||
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
|
||||
|
@ -1062,7 +1062,7 @@ igc_rx_init(struct rte_eth_dev *dev)
|
||||
struct igc_rx_queue *rxq;
|
||||
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
|
||||
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
|
||||
uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
uint32_t max_rx_pktlen;
|
||||
uint32_t rctl;
|
||||
uint32_t rxcsum;
|
||||
uint16_t buf_size;
|
||||
@ -1080,17 +1080,17 @@ igc_rx_init(struct rte_eth_dev *dev)
|
||||
IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
|
||||
|
||||
/* Configure support of jumbo frames, if any. */
|
||||
if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
if ((offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
|
||||
rctl |= IGC_RCTL_LPE;
|
||||
|
||||
/*
|
||||
* Set maximum packet length by default, and might be updated
|
||||
* together with enabling/disabling dual VLAN.
|
||||
*/
|
||||
IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len);
|
||||
} else {
|
||||
else
|
||||
rctl &= ~IGC_RCTL_LPE;
|
||||
}
|
||||
|
||||
max_rx_pktlen = dev->data->mtu + IGC_ETH_OVERHEAD;
|
||||
/*
|
||||
* Set maximum packet length by default, and might be updated
|
||||
* together with enabling/disabling dual VLAN.
|
||||
*/
|
||||
IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pktlen);
|
||||
|
||||
/* Configure and enable each RX queue. */
|
||||
rctl_bsize = 0;
|
||||
@ -1149,7 +1149,7 @@ igc_rx_init(struct rte_eth_dev *dev)
|
||||
IGC_SRRCTL_BSIZEPKT_SHIFT);
|
||||
|
||||
/* It adds dual VLAN length for supporting dual VLAN */
|
||||
if (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size)
|
||||
if (max_rx_pktlen > buf_size)
|
||||
dev->data->scattered_rx = 1;
|
||||
} else {
|
||||
/*
|
||||
|
@ -343,25 +343,15 @@ static int
|
||||
ionic_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
{
|
||||
struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
|
||||
uint32_t max_frame_size;
|
||||
int err;
|
||||
|
||||
IONIC_PRINT_CALL();
|
||||
|
||||
/*
|
||||
* Note: mtu check against IONIC_MIN_MTU, IONIC_MAX_MTU
|
||||
* is done by the the API.
|
||||
* is done by the API.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Max frame size is MTU + Ethernet header + VLAN + QinQ
|
||||
* (plus ETHER_CRC_LEN if the adapter is able to keep CRC)
|
||||
*/
|
||||
max_frame_size = mtu + RTE_ETHER_HDR_LEN + 4 + 4;
|
||||
|
||||
if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len < max_frame_size)
|
||||
return -EINVAL;
|
||||
|
||||
err = ionic_lif_change_mtu(lif, mtu);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -771,7 +771,7 @@ ionic_rx_clean(struct ionic_rx_qcq *rxq,
|
||||
struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
|
||||
struct rte_mbuf *rxm, *rxm_seg;
|
||||
uint32_t max_frame_size =
|
||||
rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
|
||||
uint64_t pkt_flags = 0;
|
||||
uint32_t pkt_type;
|
||||
struct ionic_rx_stats *stats = &rxq->stats;
|
||||
@ -1014,7 +1014,7 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
|
||||
int __rte_cold
|
||||
ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
|
||||
{
|
||||
uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
uint32_t frame_size = eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
|
||||
uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
|
||||
struct ionic_rx_qcq *rxq;
|
||||
int err;
|
||||
@ -1128,7 +1128,7 @@ ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
{
|
||||
struct ionic_rx_qcq *rxq = rx_queue;
|
||||
uint32_t frame_size =
|
||||
rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
|
||||
struct ionic_rx_service service_cb_arg;
|
||||
|
||||
service_cb_arg.rx_pkts = rx_pkts;
|
||||
|
@ -2791,14 +2791,10 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (frame_size > IPN3KE_ETH_MAX_LEN)
|
||||
dev_data->dev_conf.rxmode.offloads |=
|
||||
(uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev_data->dev_conf.rxmode.offloads &=
|
||||
(uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
|
||||
|
||||
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
if (rpst->i40e_pf_eth) {
|
||||
ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth,
|
||||
|
@ -5165,7 +5165,6 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
struct ixgbe_hw *hw;
|
||||
struct rte_eth_dev_info dev_info;
|
||||
uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
|
||||
struct rte_eth_dev_data *dev_data = dev->data;
|
||||
int ret;
|
||||
|
||||
ret = ixgbe_dev_info_get(dev, &dev_info);
|
||||
@ -5179,9 +5178,9 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
/* If device is started, refuse mtu that requires the support of
|
||||
* scattered packets when this feature has not been enabled before.
|
||||
*/
|
||||
if (dev_data->dev_started && !dev_data->scattered_rx &&
|
||||
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
|
||||
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
|
||||
if (dev->data->dev_started && !dev->data->scattered_rx &&
|
||||
frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
|
||||
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
|
||||
PMD_INIT_LOG(ERR, "Stop port first.");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -5190,23 +5189,18 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
|
||||
|
||||
/* switch to jumbo mode if needed */
|
||||
if (frame_size > IXGBE_ETH_MAX_LEN) {
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
if (mtu > RTE_ETHER_MTU) {
|
||||
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
|
||||
} else {
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
|
||||
}
|
||||
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
|
||||
maxfrs &= 0x0000FFFF;
|
||||
maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
|
||||
maxfrs |= (frame_size << 16);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
|
||||
|
||||
return 0;
|
||||
@ -6078,12 +6072,10 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
|
||||
* set as 0x4.
|
||||
*/
|
||||
if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
|
||||
(rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
|
||||
IXGBE_MMW_SIZE_JUMBO_FRAME);
|
||||
(dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE))
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME);
|
||||
else
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
|
||||
IXGBE_MMW_SIZE_DEFAULT);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT);
|
||||
|
||||
/* Set RTTBCNRC of queue X */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
|
||||
@ -6355,8 +6347,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
|
||||
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
if (mtu < RTE_ETHER_MIN_MTU ||
|
||||
max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
|
||||
if (mtu < RTE_ETHER_MIN_MTU || max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
/* If device is started, refuse mtu that requires the support of
|
||||
@ -6364,7 +6355,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
*/
|
||||
if (dev_data->dev_started && !dev_data->scattered_rx &&
|
||||
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
|
||||
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
|
||||
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
|
||||
PMD_INIT_LOG(ERR, "Stop port first.");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -6381,8 +6372,6 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
if (ixgbevf_rlpml_set_vf(hw, max_frame))
|
||||
return -EINVAL;
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -573,8 +573,7 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
||||
* if PF has jumbo frames enabled which means legacy
|
||||
* VFs are disabled.
|
||||
*/
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
|
||||
IXGBE_ETH_MAX_LEN)
|
||||
if (dev->data->mtu > RTE_ETHER_MTU)
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
@ -584,8 +583,7 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
||||
* legacy VFs.
|
||||
*/
|
||||
if (max_frame > IXGBE_ETH_MAX_LEN ||
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len >
|
||||
IXGBE_ETH_MAX_LEN)
|
||||
dev->data->mtu > RTE_ETHER_MTU)
|
||||
return -1;
|
||||
break;
|
||||
}
|
||||
|
@ -5047,6 +5047,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
|
||||
uint16_t buf_size;
|
||||
uint16_t i;
|
||||
struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
|
||||
uint32_t frame_size = dev->data->mtu + IXGBE_ETH_OVERHEAD;
|
||||
int rc;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
@ -5082,7 +5083,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
|
||||
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
|
||||
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
|
||||
maxfrs &= 0x0000FFFF;
|
||||
maxfrs |= (rx_conf->max_rx_pkt_len << 16);
|
||||
maxfrs |= (frame_size << 16);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
|
||||
} else
|
||||
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
|
||||
@ -5156,8 +5157,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
|
||||
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
|
||||
|
||||
/* It adds dual VLAN length for supporting dual VLAN */
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
2 * IXGBE_VLAN_TAG_SIZE > buf_size)
|
||||
if (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
|
||||
dev->data->scattered_rx = 1;
|
||||
if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
|
||||
rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
|
||||
@ -5637,6 +5637,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
|
||||
struct ixgbe_hw *hw;
|
||||
struct ixgbe_rx_queue *rxq;
|
||||
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
||||
uint32_t frame_size = dev->data->mtu + IXGBE_ETH_OVERHEAD;
|
||||
uint64_t bus_addr;
|
||||
uint32_t srrctl, psrtype = 0;
|
||||
uint16_t buf_size;
|
||||
@ -5673,10 +5674,9 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
|
||||
* ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
|
||||
* VF packets received can work in all cases.
|
||||
*/
|
||||
if (ixgbevf_rlpml_set_vf(hw,
|
||||
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
|
||||
if (ixgbevf_rlpml_set_vf(hw, frame_size) != 0) {
|
||||
PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
frame_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -5735,8 +5735,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
|
||||
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
|
||||
/* It adds dual VLAN length for supporting dual VLAN */
|
||||
(rxmode->max_rx_pkt_len +
|
||||
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
|
||||
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
|
||||
if (!dev->data->scattered_rx)
|
||||
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
|
||||
dev->data->scattered_rx = 1;
|
||||
|
@ -435,7 +435,6 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
{
|
||||
struct lio_device *lio_dev = LIO_DEV(eth_dev);
|
||||
uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
|
||||
uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
struct lio_dev_ctrl_cmd ctrl_cmd;
|
||||
struct lio_ctrl_pkt ctrl_pkt;
|
||||
|
||||
@ -481,16 +480,13 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (frame_len > LIO_ETH_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
eth_dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
eth_dev->data->dev_conf.rxmode.offloads &=
|
||||
~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
|
||||
eth_dev->data->mtu = mtu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1402,8 +1398,6 @@ lio_sync_link_state_check(void *eth_dev)
|
||||
static int
|
||||
lio_dev_start(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
uint16_t mtu;
|
||||
uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
struct lio_device *lio_dev = LIO_DEV(eth_dev);
|
||||
uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
|
||||
int ret = 0;
|
||||
@ -1446,15 +1440,9 @@ lio_dev_start(struct rte_eth_dev *eth_dev)
|
||||
goto dev_mtu_set_error;
|
||||
}
|
||||
|
||||
mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);
|
||||
if (mtu < RTE_ETHER_MIN_MTU)
|
||||
mtu = RTE_ETHER_MIN_MTU;
|
||||
|
||||
if (eth_dev->data->mtu != mtu) {
|
||||
ret = lio_dev_mtu_set(eth_dev, mtu);
|
||||
if (ret)
|
||||
goto dev_mtu_set_error;
|
||||
}
|
||||
ret = lio_dev_mtu_set(eth_dev, eth_dev->data->mtu);
|
||||
if (ret != 0)
|
||||
goto dev_mtu_set_error;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -753,6 +753,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
int ret;
|
||||
uint32_t crc_present;
|
||||
uint64_t offloads;
|
||||
uint32_t max_rx_pktlen;
|
||||
|
||||
offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
|
||||
|
||||
@ -829,13 +830,11 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
dev->data->rx_queues[idx] = rxq;
|
||||
/* Enable scattered packets support for this queue if necessary. */
|
||||
MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
|
||||
(mb_len - RTE_PKTMBUF_HEADROOM)) {
|
||||
max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
|
||||
;
|
||||
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
|
||||
uint32_t size =
|
||||
RTE_PKTMBUF_HEADROOM +
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
|
||||
uint32_t sges_n;
|
||||
|
||||
/*
|
||||
@ -847,21 +846,19 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
/* Make sure sges_n did not overflow. */
|
||||
size = mb_len * (1 << rxq->sges_n);
|
||||
size -= RTE_PKTMBUF_HEADROOM;
|
||||
if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
|
||||
if (size < max_rx_pktlen) {
|
||||
rte_errno = EOVERFLOW;
|
||||
ERROR("%p: too many SGEs (%u) needed to handle"
|
||||
" requested maximum packet size %u",
|
||||
(void *)dev,
|
||||
1 << sges_n,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
1 << sges_n, max_rx_pktlen);
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
WARN("%p: the requested maximum Rx packet size (%u) is"
|
||||
" larger than a single mbuf (%u) and scattered"
|
||||
" mode has not been requested",
|
||||
(void *)dev,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
(void *)dev, max_rx_pktlen,
|
||||
mb_len - RTE_PKTMBUF_HEADROOM);
|
||||
}
|
||||
DEBUG("%p: maximum number of segments per packet: %u",
|
||||
|
@ -1338,10 +1338,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
uint64_t offloads = conf->offloads |
|
||||
dev->data->dev_conf.rxmode.offloads;
|
||||
unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
|
||||
unsigned int max_rx_pkt_len = lro_on_queue ?
|
||||
unsigned int max_rx_pktlen = lro_on_queue ?
|
||||
dev->data->dev_conf.rxmode.max_lro_pkt_size :
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
|
||||
dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN;
|
||||
unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
|
||||
RTE_PKTMBUF_HEADROOM;
|
||||
unsigned int max_lro_size = 0;
|
||||
unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
|
||||
@ -1380,7 +1381,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
* needed to handle max size packets, replace zero length
|
||||
* with the buffer length from the pool.
|
||||
*/
|
||||
tail_len = max_rx_pkt_len;
|
||||
tail_len = max_rx_pktlen;
|
||||
do {
|
||||
struct mlx5_eth_rxseg *hw_seg =
|
||||
&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
|
||||
@ -1418,7 +1419,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
"port %u too many SGEs (%u) needed to handle"
|
||||
" requested maximum packet size %u, the maximum"
|
||||
" supported are %u", dev->data->port_id,
|
||||
tmpl->rxq.rxseg_n, max_rx_pkt_len,
|
||||
tmpl->rxq.rxseg_n, max_rx_pktlen,
|
||||
MLX5_MAX_RXQ_NSEG);
|
||||
rte_errno = ENOTSUP;
|
||||
goto error;
|
||||
@ -1443,7 +1444,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
|
||||
" configured and no enough mbuf space(%u) to contain "
|
||||
"the maximum RX packet length(%u) with head-room(%u)",
|
||||
dev->data->port_id, idx, mb_len, max_rx_pkt_len,
|
||||
dev->data->port_id, idx, mb_len, max_rx_pktlen,
|
||||
RTE_PKTMBUF_HEADROOM);
|
||||
rte_errno = ENOSPC;
|
||||
goto error;
|
||||
@ -1464,7 +1465,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
* following conditions are met:
|
||||
* - MPRQ is enabled.
|
||||
* - The number of descs is more than the number of strides.
|
||||
* - max_rx_pkt_len plus overhead is less than the max size
|
||||
* - max_rx_pktlen plus overhead is less than the max size
|
||||
* of a stride or mprq_stride_size is specified by a user.
|
||||
* Need to make sure that there are enough strides to encap
|
||||
* the maximum packet size in case mprq_stride_size is set.
|
||||
@ -1488,7 +1489,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
!!(offloads & DEV_RX_OFFLOAD_SCATTER);
|
||||
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
|
||||
config->mprq.max_memcpy_len);
|
||||
max_lro_size = RTE_MIN(max_rx_pkt_len,
|
||||
max_lro_size = RTE_MIN(max_rx_pktlen,
|
||||
(1u << tmpl->rxq.strd_num_n) *
|
||||
(1u << tmpl->rxq.strd_sz_n));
|
||||
DRV_LOG(DEBUG,
|
||||
@ -1497,9 +1498,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
dev->data->port_id, idx,
|
||||
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
|
||||
} else if (tmpl->rxq.rxseg_n == 1) {
|
||||
MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
|
||||
MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
|
||||
tmpl->rxq.sges_n = 0;
|
||||
max_lro_size = max_rx_pkt_len;
|
||||
max_lro_size = max_rx_pktlen;
|
||||
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
|
||||
unsigned int sges_n;
|
||||
|
||||
@ -1521,13 +1522,13 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
"port %u too many SGEs (%u) needed to handle"
|
||||
" requested maximum packet size %u, the maximum"
|
||||
" supported are %u", dev->data->port_id,
|
||||
1 << sges_n, max_rx_pkt_len,
|
||||
1 << sges_n, max_rx_pktlen,
|
||||
1u << MLX5_MAX_LOG_RQ_SEGS);
|
||||
rte_errno = ENOTSUP;
|
||||
goto error;
|
||||
}
|
||||
tmpl->rxq.sges_n = sges_n;
|
||||
max_lro_size = max_rx_pkt_len;
|
||||
max_lro_size = max_rx_pktlen;
|
||||
}
|
||||
if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
|
||||
DRV_LOG(WARNING,
|
||||
|
@ -126,10 +126,6 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
MRVL_NETA_ETH_HDRS_LEN;
|
||||
|
||||
if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
|
||||
priv->multiseg = 1;
|
||||
|
||||
@ -261,9 +257,6 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev->data->mtu = mtu;
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
|
||||
|
||||
if (!priv->ppio)
|
||||
/* It is OK. New MTU will be set later on mvneta_dev_start */
|
||||
return 0;
|
||||
|
@ -708,19 +708,18 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
struct mvneta_priv *priv = dev->data->dev_private;
|
||||
struct mvneta_rxq *rxq;
|
||||
uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
|
||||
uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
|
||||
|
||||
frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS;
|
||||
|
||||
if (frame_size < max_rx_pkt_len) {
|
||||
if (frame_size < max_rx_pktlen) {
|
||||
MVNETA_LOG(ERR,
|
||||
"Mbuf size must be increased to %u bytes to hold up "
|
||||
"to %u bytes of data.",
|
||||
buf_size + max_rx_pkt_len - frame_size,
|
||||
max_rx_pkt_len);
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
MVNETA_LOG(INFO, "Setting max rx pkt len to %u",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
max_rx_pktlen + buf_size - frame_size,
|
||||
max_rx_pktlen);
|
||||
dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
|
||||
MVNETA_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
|
||||
}
|
||||
|
||||
if (dev->data->rx_queues[idx]) {
|
||||
|
@ -496,16 +496,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
MRVL_PP2_ETH_HDRS_LEN;
|
||||
if (dev->data->mtu > priv->max_mtu) {
|
||||
MRVL_LOG(ERR, "inherit MTU %u from max_rx_pkt_len %u is larger than max_mtu %u\n",
|
||||
dev->data->mtu,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
priv->max_mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dev->data->dev_conf.rxmode.mtu > priv->max_mtu) {
|
||||
MRVL_LOG(ERR, "MTU %u is larger than max_mtu %u\n",
|
||||
dev->data->dev_conf.rxmode.mtu,
|
||||
priv->max_mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
|
||||
@ -595,9 +590,6 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev->data->mtu = mtu;
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
|
||||
|
||||
if (!priv->ppio)
|
||||
return 0;
|
||||
|
||||
@ -1994,7 +1986,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
struct mrvl_priv *priv = dev->data->dev_private;
|
||||
struct mrvl_rxq *rxq;
|
||||
uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
|
||||
uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
||||
uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
|
||||
int ret, tc, inq;
|
||||
uint64_t offloads;
|
||||
|
||||
@ -2009,17 +2001,15 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
frame_size = buf_size - RTE_PKTMBUF_HEADROOM -
|
||||
MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN;
|
||||
if (frame_size < max_rx_pkt_len) {
|
||||
frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
|
||||
if (frame_size < max_rx_pktlen) {
|
||||
MRVL_LOG(WARNING,
|
||||
"Mbuf size must be increased to %u bytes to hold up "
|
||||
"to %u bytes of data.",
|
||||
buf_size + max_rx_pkt_len - frame_size,
|
||||
max_rx_pkt_len);
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
MRVL_LOG(INFO, "Setting max rx pkt len to %u",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
max_rx_pktlen + buf_size - frame_size,
|
||||
max_rx_pktlen);
|
||||
dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
|
||||
MRVL_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
|
||||
}
|
||||
|
||||
if (dev->data->rx_queues[idx]) {
|
||||
|
@ -370,7 +370,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
|
||||
}
|
||||
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
hw->mtu = rxmode->max_rx_pkt_len;
|
||||
hw->mtu = dev->data->mtu;
|
||||
|
||||
if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
|
||||
ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
|
||||
@ -963,16 +963,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
}
|
||||
|
||||
/* switch to jumbo mode if needed */
|
||||
if ((uint32_t)mtu > RTE_ETHER_MTU)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
|
||||
|
||||
/* writing to configuration space */
|
||||
nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
|
||||
nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
|
||||
|
||||
hw->mtu = mtu;
|
||||
|
||||
|
@ -552,13 +552,11 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (frame_size > OCCTX_L2_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
/* Update max_rx_pkt_len */
|
||||
data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
octeontx_log_info("Received pkt beyond maxlen %d will be dropped",
|
||||
frame_size);
|
||||
|
||||
@ -581,7 +579,7 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
|
||||
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
/* Setup scatter mode if needed by jumbo */
|
||||
if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
|
||||
if (data->mtu > buffsz) {
|
||||
nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
|
||||
nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
|
||||
nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
|
||||
@ -593,8 +591,8 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
|
||||
evdev_priv->rx_offload_flags = nic->rx_offload_flags;
|
||||
evdev_priv->tx_offload_flags = nic->tx_offload_flags;
|
||||
|
||||
/* Setup MTU based on max_rx_pkt_len */
|
||||
nic->mtu = data->dev_conf.rxmode.max_rx_pkt_len - OCCTX_L2_OVERHEAD;
|
||||
/* Setup MTU */
|
||||
nic->mtu = data->mtu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -615,7 +613,7 @@ octeontx_dev_start(struct rte_eth_dev *dev)
|
||||
octeontx_recheck_rx_offloads(rxq);
|
||||
}
|
||||
|
||||
/* Setting up the mtu based on max_rx_pkt_len */
|
||||
/* Setting up the mtu */
|
||||
ret = octeontx_dev_mtu_set(dev, nic->mtu);
|
||||
if (ret) {
|
||||
octeontx_log_err("Failed to set default MTU size %d", ret);
|
||||
|
@ -913,7 +913,7 @@ otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
|
||||
mbp_priv = rte_mempool_get_priv(rxq->pool);
|
||||
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
|
||||
if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
|
||||
dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
|
||||
dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
|
||||
|
||||
|
@ -59,14 +59,11 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (frame_size > NIX_L2_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
/* Update max_rx_pkt_len */
|
||||
data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -75,7 +72,6 @@ otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct rte_eth_dev_data *data = eth_dev->data;
|
||||
struct otx2_eth_rxq *rxq;
|
||||
uint16_t mtu;
|
||||
int rc;
|
||||
|
||||
rxq = data->rx_queues[0];
|
||||
@ -83,10 +79,7 @@ otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
|
||||
/* Setup scatter mode if needed by jumbo */
|
||||
otx2_nix_enable_mseg_on_jumbo(rxq);
|
||||
|
||||
/* Setup MTU based on max_rx_pkt_len */
|
||||
mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD;
|
||||
|
||||
rc = otx2_nix_mtu_set(eth_dev, mtu);
|
||||
rc = otx2_nix_mtu_set(eth_dev, data->mtu);
|
||||
if (rc)
|
||||
otx2_err("Failed to set default MTU size %d", rc);
|
||||
|
||||
|
@ -670,16 +670,11 @@ pfe_link_up(struct rte_eth_dev *dev)
|
||||
static int
|
||||
pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
{
|
||||
int ret;
|
||||
struct pfe_eth_priv_s *priv = dev->data->dev_private;
|
||||
uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
|
||||
/*TODO Support VLAN*/
|
||||
ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
|
||||
if (!ret)
|
||||
dev->data->mtu = mtu;
|
||||
|
||||
return ret;
|
||||
return gemac_set_rx(priv->EMAC_baseaddr, frame_size);
|
||||
}
|
||||
|
||||
/* pfe_eth_enet_addr_byte_mac
|
||||
|
@ -1312,12 +1312,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* If jumbo enabled adjust MTU */
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
eth_dev->data->mtu =
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
|
||||
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
|
||||
eth_dev->data->scattered_rx = 1;
|
||||
|
||||
@ -2315,7 +2309,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct rte_eth_dev_info dev_info = {0};
|
||||
struct qede_fastpath *fp;
|
||||
uint32_t max_rx_pkt_len;
|
||||
uint32_t frame_size;
|
||||
uint16_t bufsz;
|
||||
bool restart = false;
|
||||
@ -2327,8 +2320,8 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
DP_ERR(edev, "Error during getting ethernet device info\n");
|
||||
return rc;
|
||||
}
|
||||
max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
|
||||
frame_size = max_rx_pkt_len;
|
||||
|
||||
frame_size = mtu + QEDE_MAX_ETHER_HDR_LEN;
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
|
||||
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
|
||||
mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
|
||||
@ -2368,7 +2361,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
fp->rxq->rx_buf_size = rc;
|
||||
}
|
||||
}
|
||||
if (frame_size > QEDE_ETH_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
@ -2378,9 +2371,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
dev->data->dev_started = 1;
|
||||
}
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
||||
struct qede_rx_queue *rxq;
|
||||
uint16_t max_rx_pkt_len;
|
||||
uint16_t max_rx_pktlen;
|
||||
uint16_t bufsz;
|
||||
int rc;
|
||||
|
||||
@ -243,21 +243,21 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
|
||||
dev->data->rx_queues[qid] = NULL;
|
||||
}
|
||||
|
||||
max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
|
||||
max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
|
||||
|
||||
/* Fix up RX buffer size */
|
||||
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
|
||||
/* cache align the mbuf size to simplfy rx_buf_size calculation */
|
||||
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
|
||||
if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
|
||||
(max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
|
||||
(max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
|
||||
if (!dev->data->scattered_rx) {
|
||||
DP_INFO(edev, "Forcing scatter-gather mode\n");
|
||||
dev->data->scattered_rx = 1;
|
||||
}
|
||||
}
|
||||
|
||||
rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
|
||||
rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
|
@ -1142,15 +1142,13 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
|
||||
/*
|
||||
* The driver does not use it, but other PMDs update jumbo frame
|
||||
* flag and max_rx_pkt_len when MTU is set.
|
||||
* flag when MTU is set.
|
||||
*/
|
||||
if (mtu > RTE_ETHER_MTU) {
|
||||
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
||||
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
}
|
||||
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
|
||||
|
||||
sfc_adapter_unlock(sa);
|
||||
|
||||
sfc_log_init(sa, "done");
|
||||
|
@ -383,14 +383,10 @@ sfc_port_configure(struct sfc_adapter *sa)
|
||||
{
|
||||
const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
|
||||
struct sfc_port *port = &sa->port;
|
||||
const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
|
||||
|
||||
sfc_log_init(sa, "entry");
|
||||
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
port->pdu = rxmode->max_rx_pkt_len;
|
||||
else
|
||||
port->pdu = EFX_MAC_PDU(dev_data->mtu);
|
||||
port->pdu = EFX_MAC_PDU(dev_data->mtu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1627,13 +1627,8 @@ tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
{
|
||||
struct pmd_internals *pmd = dev->data->dev_private;
|
||||
struct ifreq ifr = { .ifr_mtu = mtu };
|
||||
int err = 0;
|
||||
|
||||
err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
|
||||
if (!err)
|
||||
dev->data->mtu = mtu;
|
||||
|
||||
return err;
|
||||
return tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -176,7 +176,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
|
||||
return -EINVAL;
|
||||
|
||||
if (frame_size > NIC_HW_L2_MAX_LEN)
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
@ -184,8 +184,6 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
if (nicvf_mbox_update_hw_max_frs(nic, mtu))
|
||||
return -EINVAL;
|
||||
|
||||
/* Update max_rx_pkt_len */
|
||||
rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN;
|
||||
nic->mtu = mtu;
|
||||
|
||||
for (i = 0; i < nic->sqs_count; i++)
|
||||
@ -1723,16 +1721,13 @@ nicvf_dev_start(struct rte_eth_dev *dev)
|
||||
}
|
||||
|
||||
/* Setup scatter mode if needed by jumbo */
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
2 * VLAN_TAG_SIZE > buffsz)
|
||||
if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
|
||||
dev->data->scattered_rx = 1;
|
||||
if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
|
||||
dev->data->scattered_rx = 1;
|
||||
|
||||
/* Setup MTU based on max_rx_pkt_len or default */
|
||||
mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len
|
||||
- RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
|
||||
/* Setup MTU */
|
||||
mtu = dev->data->mtu;
|
||||
|
||||
if (nicvf_dev_set_mtu(dev, mtu)) {
|
||||
PMD_INIT_LOG(ERR, "Failed to set default mtu size");
|
||||
|
@ -3482,8 +3482,11 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
/* switch to jumbo mode if needed */
|
||||
if (mtu > RTE_ETHER_MTU)
|
||||
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
if (hw->mode)
|
||||
wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
|
||||
|
@ -55,6 +55,10 @@
|
||||
#define TXGBE_5TUPLE_MAX_PRI 7
|
||||
#define TXGBE_5TUPLE_MIN_PRI 1
|
||||
|
||||
|
||||
/* The overhead from MTU to max frame size. */
|
||||
#define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
|
||||
|
||||
#define TXGBE_RSS_OFFLOAD_ALL ( \
|
||||
ETH_RSS_IPV4 | \
|
||||
ETH_RSS_NONFRAG_IPV4_TCP | \
|
||||
|
@ -1128,8 +1128,6 @@ txgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
if (txgbevf_rlpml_set_vf(hw, max_frame))
|
||||
return -EINVAL;
|
||||
|
||||
/* update max frame size */
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4326,13 +4326,8 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
|
||||
/*
|
||||
* Configure jumbo frame support, if any.
|
||||
*/
|
||||
if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
|
||||
TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
|
||||
} else {
|
||||
wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
|
||||
TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
|
||||
}
|
||||
wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
|
||||
TXGBE_FRMSZ_MAX(dev->data->mtu + TXGBE_ETH_OVERHEAD));
|
||||
|
||||
/*
|
||||
* If loopback mode is configured, set LPBK bit.
|
||||
@ -4394,8 +4389,8 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
|
||||
wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
|
||||
|
||||
/* It adds dual VLAN length for supporting dual VLAN */
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
|
||||
2 * TXGBE_VLAN_TAG_SIZE > buf_size)
|
||||
if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
|
||||
2 * TXGBE_VLAN_TAG_SIZE > buf_size)
|
||||
dev->data->scattered_rx = 1;
|
||||
if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
|
||||
rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
|
||||
@ -4847,9 +4842,9 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
|
||||
* VF packets received can work in all cases.
|
||||
*/
|
||||
if (txgbevf_rlpml_set_vf(hw,
|
||||
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
|
||||
(uint16_t)dev->data->mtu + TXGBE_ETH_OVERHEAD)) {
|
||||
PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
dev->data->mtu + TXGBE_ETH_OVERHEAD);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -4911,7 +4906,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
|
||||
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
|
||||
/* It adds dual VLAN length for supporting dual VLAN */
|
||||
(rxmode->max_rx_pkt_len +
|
||||
(dev->data->mtu + TXGBE_ETH_OVERHEAD +
|
||||
2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
|
||||
if (!dev->data->scattered_rx)
|
||||
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
|
||||
|
@ -924,7 +924,6 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
}
|
||||
|
||||
hw->max_rx_pkt_len = frame_size;
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = hw->max_rx_pkt_len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2107,14 +2106,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
|
||||
(rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len))
|
||||
if (rxmode->mtu > hw->max_mtu)
|
||||
req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
|
||||
hw->max_rx_pkt_len = rxmode->max_rx_pkt_len;
|
||||
else
|
||||
hw->max_rx_pkt_len = ether_hdr_len + dev->data->mtu;
|
||||
hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
|
||||
|
||||
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
|
||||
DEV_RX_OFFLOAD_TCP_CKSUM))
|
||||
|
@ -71,7 +71,6 @@ mbuf_input(struct rte_mbuf *mbuf)
|
||||
static const struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
|
@ -115,7 +115,6 @@ static struct rte_mempool *mbuf_pool;
|
||||
static struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.rx_adv_conf = {
|
||||
|
@ -81,7 +81,6 @@ struct app_stats prev_app_stats;
|
||||
static const struct rte_eth_conf port_conf_default = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
},
|
||||
.txmode = {
|
||||
.mq_mode = ETH_MQ_TX_NONE,
|
||||
|
@ -284,7 +284,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
|
||||
static const struct rte_eth_conf port_conf_default = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
},
|
||||
.rx_adv_conf = {
|
||||
.rss_conf = {
|
||||
|
@ -615,7 +615,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
|
||||
static const struct rte_eth_conf port_conf_default = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
},
|
||||
.rx_adv_conf = {
|
||||
.rss_conf = {
|
||||
|
@ -59,14 +59,6 @@ static struct{
|
||||
} parm_config;
|
||||
const char cb_port_delim[] = ":";
|
||||
|
||||
/* Ethernet ports configured with default settings using struct. 8< */
|
||||
static const struct rte_eth_conf port_conf_default = {
|
||||
.rxmode = {
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
},
|
||||
};
|
||||
/* >8 End of configuration of Ethernet ports. */
|
||||
|
||||
/* Creation of flow classifier object. 8< */
|
||||
struct flow_classifier {
|
||||
struct rte_flow_classifier *cls;
|
||||
@ -200,7 +192,7 @@ static struct rte_flow_attr attr;
|
||||
static inline int
|
||||
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
|
||||
{
|
||||
struct rte_eth_conf port_conf = port_conf_default;
|
||||
struct rte_eth_conf port_conf;
|
||||
struct rte_ether_addr addr;
|
||||
const uint16_t rx_rings = 1, tx_rings = 1;
|
||||
int retval;
|
||||
@ -211,6 +203,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
|
||||
if (!rte_eth_dev_is_valid_port(port))
|
||||
return -1;
|
||||
|
||||
memset(&port_conf, 0, sizeof(struct rte_eth_conf));
|
||||
|
||||
retval = rte_eth_dev_info_get(port, &dev_info);
|
||||
if (retval != 0) {
|
||||
printf("Error during getting device (port %u) info: %s\n",
|
||||
|
@ -820,7 +820,6 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
|
||||
static const struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN
|
||||
},
|
||||
.rx_adv_conf = {
|
||||
.rss_conf = {
|
||||
|
@ -145,7 +145,8 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
|
||||
|
||||
static struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
|
||||
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
|
||||
RTE_ETHER_CRC_LEN,
|
||||
.split_hdr_size = 0,
|
||||
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
|
||||
DEV_RX_OFFLOAD_SCATTER |
|
||||
@ -917,9 +918,9 @@ main(int argc, char **argv)
|
||||
"Error during getting device (port %u) info: %s\n",
|
||||
portid, strerror(-ret));
|
||||
|
||||
local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
|
||||
dev_info.max_rx_pktlen,
|
||||
local_port_conf.rxmode.max_rx_pkt_len);
|
||||
local_port_conf.rxmode.mtu = RTE_MIN(
|
||||
dev_info.max_mtu,
|
||||
local_port_conf.rxmode.mtu);
|
||||
|
||||
/* get the lcore_id for this port */
|
||||
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
|
||||
@ -962,8 +963,7 @@ main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* set the mtu to the maximum received packet size */
|
||||
ret = rte_eth_dev_set_mtu(portid,
|
||||
local_port_conf.rxmode.max_rx_pkt_len - MTU_OVERHEAD);
|
||||
ret = rte_eth_dev_set_mtu(portid, local_port_conf.rxmode.mtu);
|
||||
if (ret < 0) {
|
||||
printf("\n");
|
||||
rte_exit(EXIT_FAILURE, "Set MTU failed: "
|
||||
|
@ -46,7 +46,7 @@ static struct rte_eth_conf port_conf_default = {
|
||||
.link_speeds = 0,
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
|
||||
.mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
|
||||
.split_hdr_size = 0, /* Header split buffer size */
|
||||
},
|
||||
.rx_adv_conf = {
|
||||
|
@ -161,7 +161,8 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
|
||||
static struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
|
||||
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
|
||||
RTE_ETHER_CRC_LEN,
|
||||
.split_hdr_size = 0,
|
||||
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME),
|
||||
@ -881,7 +882,8 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
|
||||
|
||||
/* mbufs stored int the gragment table. 8< */
|
||||
nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
|
||||
nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
|
||||
nb_mbuf *= (port_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
|
||||
+ BUF_SIZE - 1) / BUF_SIZE;
|
||||
nb_mbuf *= 2; /* ipv4 and ipv6 */
|
||||
nb_mbuf += nb_rxd + nb_txd;
|
||||
|
||||
@ -1053,9 +1055,9 @@ main(int argc, char **argv)
|
||||
"Error during getting device (port %u) info: %s\n",
|
||||
portid, strerror(-ret));
|
||||
|
||||
local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
|
||||
dev_info.max_rx_pktlen,
|
||||
local_port_conf.rxmode.max_rx_pkt_len);
|
||||
local_port_conf.rxmode.mtu = RTE_MIN(
|
||||
dev_info.max_mtu,
|
||||
local_port_conf.rxmode.mtu);
|
||||
|
||||
/* get the lcore_id for this port */
|
||||
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
|
||||
|
@ -234,7 +234,6 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
|
||||
static struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
|
||||
},
|
||||
@ -2152,7 +2151,6 @@ cryptodevs_init(uint16_t req_queue_num)
|
||||
static void
|
||||
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
|
||||
{
|
||||
uint32_t frame_size;
|
||||
struct rte_eth_dev_info dev_info;
|
||||
struct rte_eth_txconf *txconf;
|
||||
uint16_t nb_tx_queue, nb_rx_queue;
|
||||
@ -2200,10 +2198,9 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
|
||||
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
|
||||
nb_rx_queue, nb_tx_queue);
|
||||
|
||||
frame_size = MTU_TO_FRAMELEN(mtu_size);
|
||||
if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
|
||||
if (mtu_size > RTE_ETHER_MTU)
|
||||
local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
local_port_conf.rxmode.max_rx_pkt_len = frame_size;
|
||||
local_port_conf.rxmode.mtu = mtu_size;
|
||||
|
||||
if (multi_seg_required()) {
|
||||
local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
|
||||
|
@ -109,7 +109,8 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
|
||||
|
||||
static struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
|
||||
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
|
||||
RTE_ETHER_CRC_LEN,
|
||||
.split_hdr_size = 0,
|
||||
.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
|
||||
},
|
||||
@ -714,9 +715,9 @@ main(int argc, char **argv)
|
||||
"Error during getting device (port %u) info: %s\n",
|
||||
portid, strerror(-ret));
|
||||
|
||||
local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
|
||||
dev_info.max_rx_pktlen,
|
||||
local_port_conf.rxmode.max_rx_pkt_len);
|
||||
local_port_conf.rxmode.mtu = RTE_MIN(
|
||||
dev_info.max_mtu,
|
||||
local_port_conf.rxmode.mtu);
|
||||
|
||||
/* get the lcore_id for this port */
|
||||
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user