net/dpaa: fix jumbo packet Rx in case of VSP

For packet length of size more than 2K bytes, segmented packets were
being received in DPDK even if mbuf size was greater than packet
length. This is due to the configuration in VSP.

This patch fixes the issue by configuring the VSP according to the
mbuf size configured during mempool configuration.

Fixes: e4abd4ff18 ("net/dpaa: support virtual storage profile")
Cc: stable@dpdk.org

Signed-off-by: Rohit Raj <rohit.raj@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Rohit Raj 2022-10-07 08:57:37 +05:30 committed by Ferruh Yigit
parent 79711846f6
commit 65afdda04b
3 changed files with 11 additions and 12 deletions

View File

@ -989,8 +989,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
} else {
DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
" larger than a single mbuf (%u) and scattered"
" mode has not been requested",
max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM);
" mode has not been requested", max_rx_pktlen, buffsz);
}
dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
@ -1005,7 +1004,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (vsp_id >= 0) {
ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
fif);
fif, buffsz + RTE_PKTMBUF_HEADROOM);
if (ret) {
DPAA_PMD_ERR("dpaa_port_vsp_update failed");
return ret;

View File

@ -939,7 +939,7 @@ int dpaa_fm_term(void)
static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf,
uint8_t vsp_id, t_handle fman_handle,
struct fman_if *fif)
struct fman_if *fif, u32 mbuf_data_room_size)
{
t_fm_vsp_params vsp_params;
t_fm_buffer_prefix_content buf_prefix_cont;
@ -976,10 +976,8 @@ static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf,
return -1;
}
vsp_params.ext_buf_pools.num_of_pools_used = 1;
vsp_params.ext_buf_pools.ext_buf_pool[0].id =
dpaa_intf->vsp_bpid[vsp_id];
vsp_params.ext_buf_pools.ext_buf_pool[0].size =
RTE_MBUF_DEFAULT_BUF_SIZE;
vsp_params.ext_buf_pools.ext_buf_pool[0].id = dpaa_intf->vsp_bpid[vsp_id];
vsp_params.ext_buf_pools.ext_buf_pool[0].size = mbuf_data_room_size;
dpaa_intf->vsp_handle[vsp_id] = fm_vsp_config(&vsp_params);
if (!dpaa_intf->vsp_handle[vsp_id]) {
@ -1023,7 +1021,7 @@ static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf,
int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf,
bool fmc_mode, uint8_t vsp_id, uint32_t bpid,
struct fman_if *fif)
struct fman_if *fif, u32 mbuf_data_room_size)
{
int ret = 0;
t_handle fman_handle;
@ -1054,7 +1052,8 @@ int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf,
dpaa_intf->vsp_bpid[vsp_id] = bpid;
return dpaa_port_vsp_configure(dpaa_intf, vsp_id, fman_handle, fif);
return dpaa_port_vsp_configure(dpaa_intf, vsp_id, fman_handle, fif,
mbuf_data_room_size);
}
int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif)

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017,2019 NXP
* Copyright 2017,2019,2022 NXP
*/
#ifndef __DPAA_FLOW_H__
@ -11,7 +11,8 @@ int dpaa_fm_config(struct rte_eth_dev *dev, uint64_t req_dist_set);
int dpaa_fm_deconfig(struct dpaa_if *dpaa_intf, struct fman_if *fif);
void dpaa_write_fm_config_to_file(void);
int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf,
bool fmc_mode, uint8_t vsp_id, uint32_t bpid, struct fman_if *fif);
bool fmc_mode, uint8_t vsp_id, uint32_t bpid, struct fman_if *fif,
u32 mbuf_data_room_size);
int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif);
int dpaa_port_fmc_init(struct fman_if *fif,
uint32_t *fqids, int8_t *vspids, int max_nb_rxq);