2017-12-19 10:14:41 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2017-09-28 12:29:42 +00:00
|
|
|
*
|
|
|
|
* Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
|
2020-07-07 09:22:24 +00:00
|
|
|
* Copyright 2017-2020 NXP
|
2017-09-28 12:29:42 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
/* System headers */
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <sched.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/syscall.h>
|
|
|
|
|
2019-04-03 14:45:05 +00:00
|
|
|
#include <rte_string_fns.h>
|
2017-09-28 12:29:42 +00:00
|
|
|
#include <rte_byteorder.h>
|
|
|
|
#include <rte_common.h>
|
|
|
|
#include <rte_interrupts.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_debug.h>
|
|
|
|
#include <rte_pci.h>
|
|
|
|
#include <rte_atomic.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_tailq.h>
|
|
|
|
#include <rte_eal.h>
|
|
|
|
#include <rte_alarm.h>
|
|
|
|
#include <rte_ether.h>
|
2021-01-29 16:48:19 +00:00
|
|
|
#include <ethdev_driver.h>
|
2017-09-28 12:29:42 +00:00
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_ring.h>
|
|
|
|
|
2022-07-28 15:26:26 +00:00
|
|
|
#include <bus_dpaa_driver.h>
|
2017-09-28 12:29:42 +00:00
|
|
|
#include <rte_dpaa_logs.h>
|
2017-09-28 12:29:44 +00:00
|
|
|
#include <dpaa_mempool.h>
|
2017-09-28 12:29:42 +00:00
|
|
|
|
|
|
|
#include <dpaa_ethdev.h>
|
2017-09-28 12:29:44 +00:00
|
|
|
#include <dpaa_rxtx.h>
|
2020-09-04 08:39:25 +00:00
|
|
|
#include <dpaa_flow.h>
|
2018-01-10 10:46:37 +00:00
|
|
|
#include <rte_pmd_dpaa.h>
|
2017-09-28 12:29:44 +00:00
|
|
|
|
|
|
|
#include <fsl_usd.h>
|
|
|
|
#include <fsl_qman.h>
|
|
|
|
#include <fsl_bman.h>
|
|
|
|
#include <fsl_fman.h>
|
2020-07-07 09:22:28 +00:00
|
|
|
#include <process.h>
|
2020-09-24 04:02:07 +00:00
|
|
|
#include <fmlib/fm_ext.h>
|
2017-09-28 12:29:42 +00:00
|
|
|
|
2021-02-24 12:42:52 +00:00
|
|
|
#define CHECK_INTERVAL 100 /* 100ms */
|
|
|
|
#define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */
|
|
|
|
|
2018-04-24 17:16:13 +00:00
|
|
|
/* Supported Rx offloads */
|
|
|
|
static uint64_t dev_rx_offloads_sup =
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_RX_OFFLOAD_SCATTER;
|
2018-04-24 17:16:13 +00:00
|
|
|
|
|
|
|
/* Rx offloads which cannot be disabled */
|
|
|
|
static uint64_t dev_rx_offloads_nodis =
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
|
|
|
|
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
|
|
|
|
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
|
|
|
|
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
|
|
|
|
RTE_ETH_RX_OFFLOAD_RSS_HASH;
|
2018-04-24 17:16:13 +00:00
|
|
|
|
|
|
|
/* Supported Tx offloads */
|
2019-08-29 10:27:09 +00:00
|
|
|
static uint64_t dev_tx_offloads_sup =
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
|
|
|
|
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
|
2018-04-24 17:16:13 +00:00
|
|
|
|
|
|
|
/* Tx offloads which cannot be disabled */
|
|
|
|
static uint64_t dev_tx_offloads_nodis =
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
|
|
|
|
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
|
|
|
|
RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
|
|
|
|
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
|
|
|
|
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
|
|
|
|
RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
|
2018-04-24 17:16:13 +00:00
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
/* Keep track of whether QMAN and BMAN have been globally initialized */
|
|
|
|
static int is_global_init;
|
2020-09-04 08:39:25 +00:00
|
|
|
static int fmc_q = 1; /* Indicates the use of static fmc for distribution */
|
2018-07-06 08:10:06 +00:00
|
|
|
static int default_q; /* use default queue - FMC is not executed*/
|
2018-05-09 09:49:43 +00:00
|
|
|
/* At present we only allow up to 4 push mode queues as default - as each of
|
|
|
|
* this queue need dedicated portal and we are short of portals.
|
2018-01-10 10:46:40 +00:00
|
|
|
*/
|
2018-05-09 09:49:43 +00:00
|
|
|
#define DPAA_MAX_PUSH_MODE_QUEUE 8
|
|
|
|
#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
|
2018-01-10 10:46:40 +00:00
|
|
|
|
2018-05-09 09:49:43 +00:00
|
|
|
static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
|
2018-01-10 10:46:40 +00:00
|
|
|
static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
|
2020-07-07 09:22:24 +00:00
|
|
|
/* Per RX FQ Taildrop in frame count */
|
2018-01-10 10:46:28 +00:00
|
|
|
static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
|
|
|
|
|
2020-07-07 09:22:24 +00:00
|
|
|
/* Per TX FQ Taildrop in frame count, disabled by default */
|
|
|
|
static unsigned int td_tx_threshold;
|
|
|
|
|
2017-09-28 12:30:00 +00:00
|
|
|
struct rte_dpaa_xstats_name_off {
|
|
|
|
char name[RTE_ETH_XSTATS_NAME_SIZE];
|
|
|
|
uint32_t offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
|
|
|
|
{"rx_align_err",
|
|
|
|
offsetof(struct dpaa_if_stats, raln)},
|
|
|
|
{"rx_valid_pause",
|
|
|
|
offsetof(struct dpaa_if_stats, rxpf)},
|
|
|
|
{"rx_fcs_err",
|
|
|
|
offsetof(struct dpaa_if_stats, rfcs)},
|
|
|
|
{"rx_vlan_frame",
|
|
|
|
offsetof(struct dpaa_if_stats, rvlan)},
|
|
|
|
{"rx_frame_err",
|
|
|
|
offsetof(struct dpaa_if_stats, rerr)},
|
|
|
|
{"rx_drop_err",
|
|
|
|
offsetof(struct dpaa_if_stats, rdrp)},
|
|
|
|
{"rx_undersized",
|
|
|
|
offsetof(struct dpaa_if_stats, rund)},
|
|
|
|
{"rx_oversize_err",
|
|
|
|
offsetof(struct dpaa_if_stats, rovr)},
|
|
|
|
{"rx_fragment_pkt",
|
|
|
|
offsetof(struct dpaa_if_stats, rfrg)},
|
|
|
|
{"tx_valid_pause",
|
|
|
|
offsetof(struct dpaa_if_stats, txpf)},
|
|
|
|
{"tx_fcs_err",
|
|
|
|
offsetof(struct dpaa_if_stats, terr)},
|
|
|
|
{"tx_vlan_frame",
|
|
|
|
offsetof(struct dpaa_if_stats, tvlan)},
|
|
|
|
{"rx_undersized",
|
|
|
|
offsetof(struct dpaa_if_stats, tund)},
|
|
|
|
};
|
|
|
|
|
2018-01-10 10:46:37 +00:00
|
|
|
static struct rte_dpaa_driver rte_dpaa_pmd;
|
|
|
|
|
2019-09-12 16:42:28 +00:00
|
|
|
static int
|
2018-04-11 11:05:39 +00:00
|
|
|
dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
|
|
|
|
|
2020-07-07 09:22:28 +00:00
|
|
|
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
|
|
|
|
int wait_to_complete __rte_unused);
|
|
|
|
|
|
|
|
static void dpaa_interrupt_handler(void *param);
|
|
|
|
|
2018-01-16 20:43:57 +00:00
|
|
|
static inline void
|
|
|
|
dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
|
|
|
|
{
|
|
|
|
memset(opts, 0, sizeof(struct qm_mcc_initfq));
|
|
|
|
opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
|
|
|
|
opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
|
|
|
|
QM_FQCTRL_PREFERINCACHE;
|
|
|
|
opts->fqd.context_a.stashing.exclusive = 0;
|
|
|
|
if (dpaa_svr_family != SVR_LS1046A_FAMILY)
|
|
|
|
opts->fqd.context_a.stashing.annotation_cl =
|
|
|
|
DPAA_IF_RX_ANNOTATION_STASH;
|
|
|
|
opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
|
|
|
|
opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:45 +00:00
|
|
|
static int
|
|
|
|
dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
|
|
|
{
|
2019-05-21 16:13:05 +00:00
|
|
|
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
|
2018-01-10 10:46:27 +00:00
|
|
|
+ VLAN_TAG_SIZE;
|
2018-09-21 11:05:52 +00:00
|
|
|
uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
|
2017-09-28 12:29:45 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2018-09-21 11:05:52 +00:00
|
|
|
/*
|
|
|
|
* Refuse mtu that requires the support of scattered packets
|
|
|
|
* when this feature has not been enabled before.
|
|
|
|
*/
|
|
|
|
if (dev->data->min_rx_buf_size &&
|
|
|
|
!dev->data->scattered_rx && frame_size > buffsz) {
|
|
|
|
DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check <seg size> * <max_seg> >= max_frame */
|
|
|
|
if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
|
|
|
|
(frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
|
|
|
|
DPAA_PMD_ERR("Too big to fit for Max SG list %d",
|
|
|
|
buffsz * DPAA_SGT_MAX_ENTRIES);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_set_maxfrm(dev->process_private, frame_size);
|
2017-09-28 12:29:45 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
static int
|
2018-04-11 11:05:39 +00:00
|
|
|
dpaa_eth_dev_configure(struct rte_eth_dev *dev)
|
2017-09-28 12:29:42 +00:00
|
|
|
{
|
2018-04-11 11:05:39 +00:00
|
|
|
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
|
|
|
|
uint64_t rx_offloads = eth_conf->rxmode.offloads;
|
|
|
|
uint64_t tx_offloads = eth_conf->txmode.offloads;
|
2022-01-03 10:01:25 +00:00
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
2020-07-07 09:22:28 +00:00
|
|
|
struct rte_device *rdev = dev->device;
|
2020-09-24 04:02:09 +00:00
|
|
|
struct rte_eth_link *link = &dev->data->dev_link;
|
2020-07-07 09:22:28 +00:00
|
|
|
struct rte_dpaa_device *dpaa_dev;
|
|
|
|
struct fman_if *fif = dev->process_private;
|
|
|
|
struct __fman_if *__fif;
|
|
|
|
struct rte_intr_handle *intr_handle;
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
uint32_t max_rx_pktlen;
|
2020-09-24 04:02:09 +00:00
|
|
|
int speed, duplex;
|
2022-01-03 10:01:25 +00:00
|
|
|
int ret, rx_status;
|
2018-01-10 10:46:27 +00:00
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:28 +00:00
|
|
|
dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
|
2021-10-22 20:49:32 +00:00
|
|
|
intr_handle = dpaa_dev->intr_handle;
|
2020-07-07 09:22:28 +00:00
|
|
|
__fif = container_of(fif, struct __fman_if, __if);
|
|
|
|
|
2022-01-03 10:01:25 +00:00
|
|
|
/* Check if interface is enabled in case of shared MAC */
|
|
|
|
if (fif->is_shared_mac) {
|
|
|
|
rx_status = fman_if_get_rx_status(fif);
|
|
|
|
if (!rx_status) {
|
|
|
|
DPAA_PMD_ERR("%s Interface not enabled in kernel!",
|
|
|
|
dpaa_intf->name);
|
|
|
|
return -EHOSTDOWN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-29 10:27:09 +00:00
|
|
|
/* Rx offloads which are enabled by default */
|
2018-04-24 17:16:13 +00:00
|
|
|
if (dev_rx_offloads_nodis & ~rx_offloads) {
|
2019-08-29 10:27:09 +00:00
|
|
|
DPAA_PMD_INFO(
|
|
|
|
"Some of rx offloads enabled by default - requested 0x%" PRIx64
|
|
|
|
" fixed are 0x%" PRIx64,
|
|
|
|
rx_offloads, dev_rx_offloads_nodis);
|
2018-04-24 17:16:13 +00:00
|
|
|
}
|
2018-04-11 11:05:39 +00:00
|
|
|
|
2019-08-29 10:27:09 +00:00
|
|
|
/* Tx offloads which are enabled by default */
|
2018-04-24 17:16:13 +00:00
|
|
|
if (dev_tx_offloads_nodis & ~tx_offloads) {
|
2019-08-29 10:27:09 +00:00
|
|
|
DPAA_PMD_INFO(
|
|
|
|
"Some of tx offloads enabled by default - requested 0x%" PRIx64
|
|
|
|
" fixed are 0x%" PRIx64,
|
|
|
|
tx_offloads, dev_tx_offloads_nodis);
|
2018-04-11 11:05:39 +00:00
|
|
|
}
|
|
|
|
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
|
|
|
|
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
|
|
|
|
if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) {
|
|
|
|
DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
|
|
|
|
"supported is %d",
|
|
|
|
max_rx_pktlen, DPAA_MAX_RX_PKT_LEN);
|
|
|
|
max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
|
2017-09-28 12:29:46 +00:00
|
|
|
}
|
2018-09-21 11:05:52 +00:00
|
|
|
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
|
|
|
|
|
2021-10-22 11:03:12 +00:00
|
|
|
if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
|
2018-09-21 11:05:52 +00:00
|
|
|
DPAA_PMD_DEBUG("enabling scatter mode");
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_set_sg(dev->process_private, 1);
|
2018-09-21 11:05:52 +00:00
|
|
|
dev->data->scattered_rx = 1;
|
|
|
|
}
|
|
|
|
|
2020-09-04 08:39:29 +00:00
|
|
|
if (!(default_q || fmc_q)) {
|
|
|
|
if (dpaa_fm_config(dev,
|
|
|
|
eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
|
|
|
|
dpaa_write_fm_config_to_file();
|
|
|
|
DPAA_PMD_ERR("FM port configuration: Failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
dpaa_write_fm_config_to_file();
|
|
|
|
}
|
|
|
|
|
2020-07-07 09:22:28 +00:00
|
|
|
/* if the interrupts were configured on this devices*/
|
2021-10-22 20:49:32 +00:00
|
|
|
if (intr_handle && rte_intr_fd_get(intr_handle)) {
|
2020-07-07 09:22:28 +00:00
|
|
|
if (dev->data->dev_conf.intr_conf.lsc != 0)
|
|
|
|
rte_intr_callback_register(intr_handle,
|
|
|
|
dpaa_interrupt_handler,
|
|
|
|
(void *)dev);
|
|
|
|
|
2021-10-22 20:49:32 +00:00
|
|
|
ret = dpaa_intr_enable(__fif->node_name,
|
|
|
|
rte_intr_fd_get(intr_handle));
|
2020-07-07 09:22:28 +00:00
|
|
|
if (ret) {
|
|
|
|
if (dev->data->dev_conf.intr_conf.lsc != 0) {
|
|
|
|
rte_intr_callback_unregister(intr_handle,
|
|
|
|
dpaa_interrupt_handler,
|
|
|
|
(void *)dev);
|
|
|
|
if (ret == EINVAL)
|
|
|
|
printf("Failed to enable interrupt: Not Supported\n");
|
|
|
|
else
|
|
|
|
printf("Failed to enable interrupt\n");
|
|
|
|
}
|
|
|
|
dev->data->dev_conf.intr_conf.lsc = 0;
|
|
|
|
dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
|
|
|
|
}
|
|
|
|
}
|
2020-09-24 04:02:09 +00:00
|
|
|
|
|
|
|
/* Wait for link status to get updated */
|
|
|
|
if (!link->link_status)
|
|
|
|
sleep(1);
|
|
|
|
|
|
|
|
/* Configure link only if link is UP*/
|
|
|
|
if (link->link_status) {
|
2021-10-22 11:03:12 +00:00
|
|
|
if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
|
2020-09-24 04:02:09 +00:00
|
|
|
/* Start autoneg only if link is not in autoneg mode */
|
|
|
|
if (!link->link_autoneg)
|
|
|
|
dpaa_restart_link_autoneg(__fif->node_name);
|
2021-10-22 11:03:12 +00:00
|
|
|
} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
|
|
|
|
switch (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
|
|
|
|
case RTE_ETH_LINK_SPEED_10M_HD:
|
|
|
|
speed = RTE_ETH_SPEED_NUM_10M;
|
|
|
|
duplex = RTE_ETH_LINK_HALF_DUPLEX;
|
2020-09-24 04:02:09 +00:00
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_LINK_SPEED_10M:
|
|
|
|
speed = RTE_ETH_SPEED_NUM_10M;
|
|
|
|
duplex = RTE_ETH_LINK_FULL_DUPLEX;
|
2020-09-24 04:02:09 +00:00
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_LINK_SPEED_100M_HD:
|
|
|
|
speed = RTE_ETH_SPEED_NUM_100M;
|
|
|
|
duplex = RTE_ETH_LINK_HALF_DUPLEX;
|
2020-09-24 04:02:09 +00:00
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_LINK_SPEED_100M:
|
|
|
|
speed = RTE_ETH_SPEED_NUM_100M;
|
|
|
|
duplex = RTE_ETH_LINK_FULL_DUPLEX;
|
2020-09-24 04:02:09 +00:00
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_LINK_SPEED_1G:
|
|
|
|
speed = RTE_ETH_SPEED_NUM_1G;
|
|
|
|
duplex = RTE_ETH_LINK_FULL_DUPLEX;
|
2020-09-24 04:02:09 +00:00
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_LINK_SPEED_2_5G:
|
|
|
|
speed = RTE_ETH_SPEED_NUM_2_5G;
|
|
|
|
duplex = RTE_ETH_LINK_FULL_DUPLEX;
|
2020-09-24 04:02:09 +00:00
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_LINK_SPEED_10G:
|
|
|
|
speed = RTE_ETH_SPEED_NUM_10G;
|
|
|
|
duplex = RTE_ETH_LINK_FULL_DUPLEX;
|
2020-09-24 04:02:09 +00:00
|
|
|
break;
|
|
|
|
default:
|
2021-10-22 11:03:12 +00:00
|
|
|
speed = RTE_ETH_SPEED_NUM_NONE;
|
|
|
|
duplex = RTE_ETH_LINK_FULL_DUPLEX;
|
2020-09-24 04:02:09 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Set link speed */
|
|
|
|
dpaa_update_link_speed(__fif->node_name, speed, duplex);
|
|
|
|
} else {
|
|
|
|
/* Manual autoneg - custom advertisement speed. */
|
|
|
|
printf("Custom Advertisement speeds not supported\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:55 +00:00
|
|
|
static const uint32_t *
|
|
|
|
dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
static const uint32_t ptypes[] = {
|
|
|
|
RTE_PTYPE_L2_ETHER,
|
2018-12-26 11:33:54 +00:00
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
RTE_PTYPE_L2_ETHER_ARP,
|
|
|
|
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
|
|
|
|
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
|
|
|
|
RTE_PTYPE_L4_ICMP,
|
|
|
|
RTE_PTYPE_L4_TCP,
|
|
|
|
RTE_PTYPE_L4_UDP,
|
|
|
|
RTE_PTYPE_L4_FRAG,
|
2017-09-28 12:29:55 +00:00
|
|
|
RTE_PTYPE_L4_TCP,
|
|
|
|
RTE_PTYPE_L4_UDP,
|
2022-10-07 03:27:33 +00:00
|
|
|
RTE_PTYPE_L4_SCTP,
|
|
|
|
RTE_PTYPE_TUNNEL_ESP
|
2017-09-28 12:29:55 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
|
|
|
|
return ptypes;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-07-07 09:22:28 +00:00
|
|
|
static void dpaa_interrupt_handler(void *param)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev = param;
|
|
|
|
struct rte_device *rdev = dev->device;
|
|
|
|
struct rte_dpaa_device *dpaa_dev;
|
|
|
|
struct rte_intr_handle *intr_handle;
|
|
|
|
uint64_t buf;
|
|
|
|
int bytes_read;
|
|
|
|
|
|
|
|
dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
|
2021-10-22 20:49:32 +00:00
|
|
|
intr_handle = dpaa_dev->intr_handle;
|
2020-07-07 09:22:28 +00:00
|
|
|
|
drivers: check interrupt file descriptor validity
This patch fixes coverity issue by adding a check for negative value to
avoid bad bit shift operation and other invalid use of file descriptors.
Coverity issue: 373717, 373697, 373685
Coverity issue: 373723, 373720, 373719, 373718, 373715, 373714, 373713
Coverity issue: 373710, 373707, 373706, 373705, 373704, 373701, 373700
Coverity issue: 373698, 373695, 373692, 373690, 373689
Coverity issue: 373722, 373721, 373709, 373702, 373696
Fixes: d61138d4f0e2 ("drivers: remove direct access to interrupt handle")
Signed-off-by: Harman Kalra <hkalra@marvell.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: David Marchand <david.marchand@redhat.com>
2021-11-01 17:53:34 +00:00
|
|
|
if (rte_intr_fd_get(intr_handle) < 0)
|
|
|
|
return;
|
|
|
|
|
2021-10-22 20:49:32 +00:00
|
|
|
bytes_read = read(rte_intr_fd_get(intr_handle), &buf,
|
|
|
|
sizeof(uint64_t));
|
2020-07-07 09:22:28 +00:00
|
|
|
if (bytes_read < 0)
|
|
|
|
DPAA_PMD_ERR("Error reading eventfd\n");
|
|
|
|
dpaa_eth_link_update(dev, 0);
|
2020-09-09 13:01:48 +00:00
|
|
|
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
|
2020-07-07 09:22:28 +00:00
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
|
|
|
|
{
|
2017-09-28 12:29:44 +00:00
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-09-04 08:39:29 +00:00
|
|
|
if (!(default_q || fmc_q))
|
|
|
|
dpaa_write_fm_config_to_file();
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
/* Change tx callback to the real one */
|
2020-07-07 09:22:24 +00:00
|
|
|
if (dpaa_intf->cgr_tx)
|
|
|
|
dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
|
|
|
|
else
|
|
|
|
dev->tx_pkt_burst = dpaa_eth_queue_tx;
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_enable_rx(dev->process_private);
|
2017-09-28 12:29:42 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-15 13:30:45 +00:00
|
|
|
static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
|
2017-09-28 12:29:42 +00:00
|
|
|
{
|
2020-07-07 09:22:26 +00:00
|
|
|
struct fman_if *fif = dev->process_private;
|
2017-09-28 12:29:44 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
2020-10-16 13:32:57 +00:00
|
|
|
dev->data->dev_started = 0;
|
2017-09-28 12:29:44 +00:00
|
|
|
|
2020-09-04 08:39:26 +00:00
|
|
|
if (!fif->is_shared_mac)
|
|
|
|
fman_if_disable_rx(fif);
|
2017-09-28 12:29:44 +00:00
|
|
|
dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
|
2020-10-15 13:30:45 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-09-28 12:29:44 +00:00
|
|
|
}
|
|
|
|
|
2020-09-28 23:14:10 +00:00
|
|
|
static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
|
2017-09-28 12:29:44 +00:00
|
|
|
{
|
2020-07-07 09:22:28 +00:00
|
|
|
struct fman_if *fif = dev->process_private;
|
|
|
|
struct __fman_if *__fif;
|
|
|
|
struct rte_device *rdev = dev->device;
|
|
|
|
struct rte_dpaa_device *dpaa_dev;
|
|
|
|
struct rte_intr_handle *intr_handle;
|
2020-09-24 04:02:09 +00:00
|
|
|
struct rte_eth_link *link = &dev->data->dev_link;
|
2020-09-28 23:14:16 +00:00
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
int loop;
|
2020-10-15 13:30:45 +00:00
|
|
|
int ret;
|
2020-07-07 09:22:28 +00:00
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-09-28 23:14:16 +00:00
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!dpaa_intf) {
|
|
|
|
DPAA_PMD_WARN("Already closed or not started");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* DPAA FM deconfig */
|
|
|
|
if (!(default_q || fmc_q)) {
|
|
|
|
if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
|
|
|
|
DPAA_PMD_WARN("DPAA FM deconfig failed\n");
|
|
|
|
}
|
|
|
|
|
2020-07-07 09:22:28 +00:00
|
|
|
dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
|
2021-10-22 20:49:32 +00:00
|
|
|
intr_handle = dpaa_dev->intr_handle;
|
2020-07-07 09:22:28 +00:00
|
|
|
__fif = container_of(fif, struct __fman_if, __if);
|
|
|
|
|
2020-10-15 13:30:45 +00:00
|
|
|
ret = dpaa_eth_dev_stop(dev);
|
2020-07-07 09:22:28 +00:00
|
|
|
|
2020-09-24 04:02:09 +00:00
|
|
|
/* Reset link to autoneg */
|
|
|
|
if (link->link_status && !link->link_autoneg)
|
|
|
|
dpaa_restart_link_autoneg(__fif->node_name);
|
|
|
|
|
2021-10-22 20:49:32 +00:00
|
|
|
if (intr_handle && rte_intr_fd_get(intr_handle) &&
|
2020-07-07 09:22:28 +00:00
|
|
|
dev->data->dev_conf.intr_conf.lsc != 0) {
|
|
|
|
dpaa_intr_disable(__fif->node_name);
|
|
|
|
rte_intr_callback_unregister(intr_handle,
|
|
|
|
dpaa_interrupt_handler,
|
|
|
|
(void *)dev);
|
|
|
|
}
|
2020-09-28 23:14:10 +00:00
|
|
|
|
2020-09-28 23:14:16 +00:00
|
|
|
/* release configuration memory */
|
2022-02-09 19:17:15 +00:00
|
|
|
rte_free(dpaa_intf->fc_conf);
|
2020-09-28 23:14:16 +00:00
|
|
|
|
|
|
|
/* Release RX congestion Groups */
|
|
|
|
if (dpaa_intf->cgr_rx) {
|
|
|
|
for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
|
|
|
|
qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_free(dpaa_intf->cgr_rx);
|
|
|
|
dpaa_intf->cgr_rx = NULL;
|
|
|
|
/* Release TX congestion Groups */
|
|
|
|
if (dpaa_intf->cgr_tx) {
|
|
|
|
for (loop = 0; loop < MAX_DPAA_CORES; loop++)
|
|
|
|
qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
|
|
|
|
rte_free(dpaa_intf->cgr_tx);
|
|
|
|
dpaa_intf->cgr_tx = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_free(dpaa_intf->rx_queues);
|
|
|
|
dpaa_intf->rx_queues = NULL;
|
|
|
|
|
|
|
|
rte_free(dpaa_intf->tx_queues);
|
|
|
|
dpaa_intf->tx_queues = NULL;
|
|
|
|
|
2020-10-15 13:30:45 +00:00
|
|
|
return ret;
|
2017-09-28 12:29:44 +00:00
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:59 +00:00
|
|
|
static int
|
|
|
|
dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
char *fw_version,
|
|
|
|
size_t fw_size)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
FILE *svr_file = NULL;
|
|
|
|
unsigned int svr_ver = 0;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
svr_file = fopen(DPAA_SOC_ID_FILE, "r");
|
|
|
|
if (!svr_file) {
|
|
|
|
DPAA_PMD_ERR("Unable to open SoC device");
|
|
|
|
return -ENOTSUP; /* Not supported on this infra */
|
|
|
|
}
|
2018-01-10 10:46:26 +00:00
|
|
|
if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
|
|
|
|
dpaa_svr_family = svr_ver & SVR_MASK;
|
|
|
|
else
|
2017-09-28 12:29:59 +00:00
|
|
|
DPAA_PMD_ERR("Unable to read SoC device");
|
|
|
|
|
2018-01-10 10:46:25 +00:00
|
|
|
fclose(svr_file);
|
2017-09-28 12:29:59 +00:00
|
|
|
|
2018-01-10 10:46:25 +00:00
|
|
|
ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
|
|
|
|
svr_ver, fman_ip_rev);
|
2021-04-21 16:20:57 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return -EINVAL;
|
2018-01-10 10:46:25 +00:00
|
|
|
|
2021-04-21 16:20:57 +00:00
|
|
|
ret += 1; /* add the size of '\0' */
|
|
|
|
if (fw_size < (size_t)ret)
|
2017-09-28 12:29:59 +00:00
|
|
|
return ret;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-12 16:42:28 +00:00
|
|
|
static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_dev_info *dev_info)
|
2017-09-28 12:29:48 +00:00
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
2020-07-07 09:22:26 +00:00
|
|
|
struct fman_if *fif = dev->process_private;
|
2017-09-28 12:29:48 +00:00
|
|
|
|
2019-08-29 10:27:15 +00:00
|
|
|
DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
|
2017-09-28 12:29:48 +00:00
|
|
|
|
|
|
|
dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
|
|
|
|
dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
|
|
|
|
dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
|
|
|
|
dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
|
|
|
|
dev_info->max_hash_mac_addrs = 0;
|
|
|
|
dev_info->max_vfs = 0;
|
2021-10-22 11:03:12 +00:00
|
|
|
dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
|
2017-09-28 12:29:54 +00:00
|
|
|
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
|
2018-09-21 11:05:53 +00:00
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
if (fif->mac_type == fman_mac_1g) {
|
2021-10-22 11:03:12 +00:00
|
|
|
dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
|
|
|
|
| RTE_ETH_LINK_SPEED_10M
|
|
|
|
| RTE_ETH_LINK_SPEED_100M_HD
|
|
|
|
| RTE_ETH_LINK_SPEED_100M
|
|
|
|
| RTE_ETH_LINK_SPEED_1G;
|
2020-07-07 09:22:26 +00:00
|
|
|
} else if (fif->mac_type == fman_mac_2_5g) {
|
2021-10-22 11:03:12 +00:00
|
|
|
dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
|
|
|
|
| RTE_ETH_LINK_SPEED_10M
|
|
|
|
| RTE_ETH_LINK_SPEED_100M_HD
|
|
|
|
| RTE_ETH_LINK_SPEED_100M
|
|
|
|
| RTE_ETH_LINK_SPEED_1G
|
|
|
|
| RTE_ETH_LINK_SPEED_2_5G;
|
2020-07-07 09:22:26 +00:00
|
|
|
} else if (fif->mac_type == fman_mac_10g) {
|
2021-10-22 11:03:12 +00:00
|
|
|
dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
|
|
|
|
| RTE_ETH_LINK_SPEED_10M
|
|
|
|
| RTE_ETH_LINK_SPEED_100M_HD
|
|
|
|
| RTE_ETH_LINK_SPEED_100M
|
|
|
|
| RTE_ETH_LINK_SPEED_1G
|
|
|
|
| RTE_ETH_LINK_SPEED_2_5G
|
|
|
|
| RTE_ETH_LINK_SPEED_10G;
|
2019-09-12 16:42:28 +00:00
|
|
|
} else {
|
2018-09-21 11:05:53 +00:00
|
|
|
DPAA_PMD_ERR("invalid link_speed: %s, %d",
|
2020-07-07 09:22:26 +00:00
|
|
|
dpaa_intf->name, fif->mac_type);
|
2019-09-12 16:42:28 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-09-21 11:05:53 +00:00
|
|
|
|
2018-04-24 17:16:13 +00:00
|
|
|
dev_info->rx_offload_capa = dev_rx_offloads_sup |
|
|
|
|
dev_rx_offloads_nodis;
|
|
|
|
dev_info->tx_offload_capa = dev_tx_offloads_sup |
|
|
|
|
dev_tx_offloads_nodis;
|
2018-05-09 09:49:44 +00:00
|
|
|
dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
|
|
|
|
dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
|
2020-05-08 13:02:05 +00:00
|
|
|
dev_info->default_rxportconf.nb_queues = 1;
|
|
|
|
dev_info->default_txportconf.nb_queues = 1;
|
|
|
|
dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
|
|
|
|
dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
|
2019-09-12 16:42:28 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-09-28 12:29:48 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 16:21:31 +00:00
|
|
|
static int
|
|
|
|
dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
|
|
|
|
__rte_unused uint16_t queue_id,
|
|
|
|
struct rte_eth_burst_mode *mode)
|
|
|
|
{
|
|
|
|
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
unsigned int i;
|
|
|
|
const struct burst_info {
|
|
|
|
uint64_t flags;
|
|
|
|
const char *output;
|
|
|
|
} rx_offload_map[] = {
|
2021-10-22 11:03:12 +00:00
|
|
|
{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
|
2020-07-10 16:21:31 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Update Rx offload info */
|
|
|
|
for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
|
|
|
|
if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
|
|
|
|
snprintf(mode->info, sizeof(mode->info), "%s",
|
|
|
|
rx_offload_map[i].output);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
|
|
|
|
__rte_unused uint16_t queue_id,
|
|
|
|
struct rte_eth_burst_mode *mode)
|
|
|
|
{
|
|
|
|
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
unsigned int i;
|
|
|
|
const struct burst_info {
|
|
|
|
uint64_t flags;
|
|
|
|
const char *output;
|
|
|
|
} tx_offload_map[] = {
|
2021-10-22 11:03:12 +00:00
|
|
|
{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
|
2020-07-10 16:21:31 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Update Tx offload info */
|
|
|
|
for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
|
|
|
|
if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
|
|
|
|
snprintf(mode->info, sizeof(mode->info), "%s",
|
|
|
|
tx_offload_map[i].output);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:47 +00:00
|
|
|
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
|
2021-02-24 12:42:52 +00:00
|
|
|
int wait_to_complete)
|
2017-09-28 12:29:47 +00:00
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct rte_eth_link *link = &dev->data->dev_link;
|
2020-07-07 09:22:26 +00:00
|
|
|
struct fman_if *fif = dev->process_private;
|
2020-07-07 09:22:28 +00:00
|
|
|
struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
|
2020-09-24 04:02:09 +00:00
|
|
|
int ret, ioctl_version;
|
2021-02-24 12:42:52 +00:00
|
|
|
uint8_t count;
|
2017-09-28 12:29:47 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-09-24 04:02:09 +00:00
|
|
|
ioctl_version = dpaa_get_ioctl_version_number();
|
|
|
|
|
2020-07-07 09:22:29 +00:00
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
|
2021-02-24 12:42:52 +00:00
|
|
|
for (count = 0; count <= MAX_REPEAT_TIME; count++) {
|
|
|
|
ret = dpaa_get_link_status(__fif->node_name, link);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-10-22 11:03:12 +00:00
|
|
|
if (link->link_status == RTE_ETH_LINK_DOWN &&
|
2021-02-24 12:42:52 +00:00
|
|
|
wait_to_complete)
|
|
|
|
rte_delay_ms(CHECK_INTERVAL);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
2020-07-07 09:22:29 +00:00
|
|
|
} else {
|
|
|
|
link->link_status = dpaa_intf->valid;
|
2020-07-07 09:22:28 +00:00
|
|
|
}
|
|
|
|
|
2020-09-24 04:02:09 +00:00
|
|
|
if (ioctl_version < 2) {
|
2021-10-22 11:03:12 +00:00
|
|
|
link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
|
|
|
|
link->link_autoneg = RTE_ETH_LINK_AUTONEG;
|
2020-09-24 04:02:09 +00:00
|
|
|
|
|
|
|
if (fif->mac_type == fman_mac_1g)
|
2021-10-22 11:03:12 +00:00
|
|
|
link->link_speed = RTE_ETH_SPEED_NUM_1G;
|
2020-09-24 04:02:09 +00:00
|
|
|
else if (fif->mac_type == fman_mac_2_5g)
|
2021-10-22 11:03:12 +00:00
|
|
|
link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
|
2020-09-24 04:02:09 +00:00
|
|
|
else if (fif->mac_type == fman_mac_10g)
|
2021-10-22 11:03:12 +00:00
|
|
|
link->link_speed = RTE_ETH_SPEED_NUM_10G;
|
2020-09-24 04:02:09 +00:00
|
|
|
else
|
|
|
|
DPAA_PMD_ERR("invalid link_speed: %s, %d",
|
|
|
|
dpaa_intf->name, fif->mac_type);
|
|
|
|
}
|
2020-07-07 09:22:28 +00:00
|
|
|
|
|
|
|
DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
|
|
|
|
link->link_status ? "Up" : "Down");
|
2017-09-28 12:29:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-10 20:20:18 +00:00
|
|
|
static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
|
2017-09-28 12:29:52 +00:00
|
|
|
struct rte_eth_stats *stats)
|
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_stats_get(dev->process_private, stats);
|
2017-10-10 20:20:18 +00:00
|
|
|
return 0;
|
2017-09-28 12:29:52 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 14:34:54 +00:00
|
|
|
static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
|
2017-09-28 12:29:52 +00:00
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_stats_reset(dev->process_private);
|
2019-09-06 14:34:54 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-09-28 12:29:52 +00:00
|
|
|
}
|
2017-09-28 12:29:49 +00:00
|
|
|
|
2017-09-28 12:30:00 +00:00
|
|
|
static int
|
|
|
|
dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
|
|
|
|
uint64_t values[sizeof(struct dpaa_if_stats) / 8];
|
|
|
|
|
|
|
|
if (n < num)
|
|
|
|
return num;
|
|
|
|
|
2018-04-25 12:57:01 +00:00
|
|
|
if (xstats == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_stats_get_all(dev->process_private, values,
|
2017-09-28 12:30:00 +00:00
|
|
|
sizeof(struct dpaa_if_stats) / 8);
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
xstats[i].id = i;
|
|
|
|
xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
|
|
|
|
}
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
2018-04-09 10:22:47 +00:00
|
|
|
unsigned int limit)
|
2017-09-28 12:30:00 +00:00
|
|
|
{
|
|
|
|
unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
|
|
|
|
|
2018-04-09 10:22:47 +00:00
|
|
|
if (limit < stat_cnt)
|
|
|
|
return stat_cnt;
|
|
|
|
|
2017-09-28 12:30:00 +00:00
|
|
|
if (xstats_names != NULL)
|
|
|
|
for (i = 0; i < stat_cnt; i++)
|
2019-04-03 14:45:05 +00:00
|
|
|
strlcpy(xstats_names[i].name,
|
|
|
|
dpaa_xstats_strings[i].name,
|
|
|
|
sizeof(xstats_names[i].name));
|
2017-09-28 12:30:00 +00:00
|
|
|
|
|
|
|
return stat_cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
|
|
|
|
uint64_t *values, unsigned int n)
|
|
|
|
{
|
|
|
|
unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
|
|
|
|
uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
|
|
|
|
|
|
|
|
if (!ids) {
|
|
|
|
if (n < stat_cnt)
|
|
|
|
return stat_cnt;
|
|
|
|
|
|
|
|
if (!values)
|
|
|
|
return 0;
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_stats_get_all(dev->process_private, values_copy,
|
2018-04-09 10:22:47 +00:00
|
|
|
sizeof(struct dpaa_if_stats) / 8);
|
2017-09-28 12:30:00 +00:00
|
|
|
|
|
|
|
for (i = 0; i < stat_cnt; i++)
|
|
|
|
values[i] =
|
|
|
|
values_copy[dpaa_xstats_strings[i].offset / 8];
|
|
|
|
|
|
|
|
return stat_cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
if (ids[i] >= stat_cnt) {
|
|
|
|
DPAA_PMD_ERR("id value isn't valid");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
values[i] = values_copy[ids[i]];
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_xstats_get_names_by_id(
|
|
|
|
struct rte_eth_dev *dev,
|
|
|
|
const uint64_t *ids,
|
2021-10-01 09:07:22 +00:00
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
2017-09-28 12:30:00 +00:00
|
|
|
unsigned int limit)
|
|
|
|
{
|
|
|
|
unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
|
|
|
|
struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
|
|
|
|
|
|
|
|
if (!ids)
|
|
|
|
return dpaa_xstats_get_names(dev, xstats_names, limit);
|
|
|
|
|
|
|
|
dpaa_xstats_get_names(dev, xstats_names_copy, limit);
|
|
|
|
|
|
|
|
for (i = 0; i < limit; i++) {
|
|
|
|
if (ids[i] >= stat_cnt) {
|
|
|
|
DPAA_PMD_ERR("id value isn't valid");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
|
|
|
|
}
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
|
2019-09-14 11:37:24 +00:00
|
|
|
static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
|
2017-09-28 12:29:49 +00:00
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_promiscuous_enable(dev->process_private);
|
2019-09-14 11:37:24 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-09-28 12:29:49 +00:00
|
|
|
}
|
|
|
|
|
2019-09-14 11:37:24 +00:00
|
|
|
static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
|
2017-09-28 12:29:49 +00:00
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_promiscuous_disable(dev->process_private);
|
2019-09-14 11:37:24 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-09-28 12:29:49 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 12:56:10 +00:00
|
|
|
static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
|
2017-09-28 12:29:50 +00:00
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_set_mcast_filter_table(dev->process_private);
|
2019-09-24 12:56:10 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-09-28 12:29:50 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 12:56:10 +00:00
|
|
|
static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
|
2017-09-28 12:29:50 +00:00
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_reset_mcast_filter_table(dev->process_private);
|
2019-09-24 12:56:10 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-09-28 12:29:50 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 08:39:28 +00:00
|
|
|
static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct fman_if_ic_params icp;
|
|
|
|
uint32_t fd_offset;
|
|
|
|
uint32_t bp_size;
|
|
|
|
|
|
|
|
memset(&icp, 0, sizeof(icp));
|
|
|
|
/* set ICEOF for to the default value , which is 0*/
|
|
|
|
icp.iciof = DEFAULT_ICIOF;
|
|
|
|
icp.iceof = DEFAULT_RX_ICEOF;
|
|
|
|
icp.icsz = DEFAULT_ICSZ;
|
|
|
|
fman_if_set_ic_params(dev->process_private, &icp);
|
|
|
|
|
|
|
|
fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
|
|
|
|
fman_if_set_fdoff(dev->process_private, fd_offset);
|
|
|
|
|
|
|
|
/* Buffer pool size should be equal to Dataroom Size*/
|
|
|
|
bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
|
|
|
|
|
|
|
|
fman_if_set_bp(dev->process_private,
|
|
|
|
dpaa_intf->bp_info->mp->size,
|
|
|
|
dpaa_intf->bp_info->bpid, bp_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
|
|
|
|
int8_t vsp_id, uint32_t bpid)
|
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct fman_if *fif = dev->process_private;
|
|
|
|
|
|
|
|
if (fif->num_profiles) {
|
|
|
|
if (vsp_id < 0)
|
|
|
|
vsp_id = fif->base_profile_id;
|
|
|
|
} else {
|
|
|
|
if (vsp_id < 0)
|
|
|
|
vsp_id = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dpaa_intf->vsp_bpid[vsp_id] &&
|
|
|
|
bpid != dpaa_intf->vsp_bpid[vsp_id]) {
|
|
|
|
DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
static
|
|
|
|
int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
2018-01-10 10:46:28 +00:00
|
|
|
uint16_t nb_desc,
|
2017-09-28 12:29:44 +00:00
|
|
|
unsigned int socket_id __rte_unused,
|
2020-07-10 16:21:33 +00:00
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
2017-09-28 12:29:44 +00:00
|
|
|
struct rte_mempool *mp)
|
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
2020-07-07 09:22:26 +00:00
|
|
|
struct fman_if *fif = dev->process_private;
|
2018-01-10 10:46:28 +00:00
|
|
|
struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
|
2018-01-10 10:46:40 +00:00
|
|
|
struct qm_mcc_initfq opts = {0};
|
|
|
|
u32 flags = 0;
|
|
|
|
int ret;
|
2018-09-21 11:05:52 +00:00
|
|
|
u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
uint32_t max_rx_pktlen;
|
2017-09-28 12:29:44 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2018-07-06 08:10:01 +00:00
|
|
|
if (queue_idx >= dev->data->nb_rx_queues) {
|
|
|
|
rte_errno = EOVERFLOW;
|
|
|
|
DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
|
|
|
|
(void *)dev, queue_idx, dev->data->nb_rx_queues);
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2020-07-10 16:21:33 +00:00
|
|
|
/* Rx deferred start is not supported */
|
|
|
|
if (rx_conf->rx_deferred_start) {
|
|
|
|
DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-07-10 16:21:37 +00:00
|
|
|
rxq->nb_desc = UINT16_MAX;
|
|
|
|
rxq->offloads = rx_conf->offloads;
|
2020-07-10 16:21:33 +00:00
|
|
|
|
2018-07-06 08:10:01 +00:00
|
|
|
DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
|
|
|
|
queue_idx, rxq->fqid);
|
2017-09-28 12:29:44 +00:00
|
|
|
|
2020-09-04 08:39:28 +00:00
|
|
|
if (!fif->num_profiles) {
|
|
|
|
if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
|
|
|
|
dpaa_intf->bp_info->mp != mp) {
|
|
|
|
DPAA_PMD_WARN("Multiple pools on same interface not"
|
|
|
|
" supported");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
|
|
|
|
DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-24 12:43:06 +00:00
|
|
|
if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
|
|
|
|
dpaa_intf->bp_info->mp != mp) {
|
|
|
|
DPAA_PMD_WARN("Multiple pools on same interface not supported");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
|
|
|
|
VLAN_TAG_SIZE;
|
2018-09-21 11:05:52 +00:00
|
|
|
/* Max packet can fit in single buffer */
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
if (max_rx_pktlen <= buffsz) {
|
2018-09-21 11:05:52 +00:00
|
|
|
;
|
|
|
|
} else if (dev->data->dev_conf.rxmode.offloads &
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_RX_OFFLOAD_SCATTER) {
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
|
|
|
|
DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
|
2018-09-21 11:05:52 +00:00
|
|
|
"MaxSGlist %d",
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES);
|
2018-09-21 11:05:52 +00:00
|
|
|
rte_errno = EOVERFLOW;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
|
|
|
|
" larger than a single mbuf (%u) and scattered"
|
|
|
|
" mode has not been requested",
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM);
|
2018-09-21 11:05:52 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 08:39:28 +00:00
|
|
|
dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
|
2017-09-28 12:29:44 +00:00
|
|
|
|
2020-09-04 08:39:28 +00:00
|
|
|
/* For shared interface, it's done in kernel, skip.*/
|
|
|
|
if (!fif->is_shared_mac)
|
|
|
|
dpaa_fman_if_pool_setup(dev);
|
|
|
|
|
|
|
|
if (fif->num_profiles) {
|
|
|
|
int8_t vsp_id = rxq->vsp_id;
|
|
|
|
|
|
|
|
if (vsp_id >= 0) {
|
|
|
|
ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
|
|
|
|
DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
|
|
|
|
fif);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_ERR("dpaa_port_vsp_update failed");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DPAA_PMD_INFO("Base profile is associated to"
|
|
|
|
" RXQ fqid:%d\r\n", rxq->fqid);
|
|
|
|
if (fif->is_shared_mac) {
|
|
|
|
DPAA_PMD_ERR("Fatal: Base profile is associated"
|
|
|
|
" to shared interface on DPDK.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
dpaa_intf->vsp_bpid[fif->base_profile_id] =
|
|
|
|
DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
|
2017-09-28 12:29:44 +00:00
|
|
|
}
|
2020-09-04 08:39:28 +00:00
|
|
|
} else {
|
|
|
|
dpaa_intf->vsp_bpid[0] =
|
|
|
|
DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
|
2017-09-28 12:29:44 +00:00
|
|
|
}
|
2020-09-04 08:39:28 +00:00
|
|
|
|
|
|
|
dpaa_intf->valid = 1;
|
2018-09-21 11:05:52 +00:00
|
|
|
DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
fman_if_get_sg_enable(fif), max_rx_pktlen);
|
2018-01-10 10:46:40 +00:00
|
|
|
/* checking if push mode only, no error check for now */
|
2019-08-29 10:27:12 +00:00
|
|
|
if (!rxq->is_static &&
|
|
|
|
dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
|
2019-08-29 10:27:11 +00:00
|
|
|
struct qman_portal *qp;
|
2019-08-29 10:27:12 +00:00
|
|
|
int q_fd;
|
2019-08-29 10:27:11 +00:00
|
|
|
|
2018-01-10 10:46:40 +00:00
|
|
|
dpaa_push_queue_idx++;
|
|
|
|
opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
|
|
|
|
opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
|
|
|
|
QM_FQCTRL_CTXASTASHING |
|
|
|
|
QM_FQCTRL_PREFERINCACHE;
|
|
|
|
opts.fqd.context_a.stashing.exclusive = 0;
|
2021-11-29 16:08:02 +00:00
|
|
|
/* In multicore scenario stashing becomes a bottleneck on LS1046.
|
2018-01-23 12:27:07 +00:00
|
|
|
* So do not enable stashing in this case
|
|
|
|
*/
|
|
|
|
if (dpaa_svr_family != SVR_LS1046A_FAMILY)
|
|
|
|
opts.fqd.context_a.stashing.annotation_cl =
|
2018-01-10 10:46:40 +00:00
|
|
|
DPAA_IF_RX_ANNOTATION_STASH;
|
|
|
|
opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
|
|
|
|
opts.fqd.context_a.stashing.context_cl =
|
|
|
|
DPAA_IF_RX_CONTEXT_STASH;
|
|
|
|
|
|
|
|
/*Create a channel and associate given queue with the channel*/
|
|
|
|
qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
|
|
|
|
opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
|
|
|
|
opts.fqd.dest.channel = rxq->ch_id;
|
|
|
|
opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
|
|
|
|
flags = QMAN_INITFQ_FLAG_SCHED;
|
|
|
|
|
|
|
|
/* Configure tail drop */
|
|
|
|
if (dpaa_intf->cgr_rx) {
|
|
|
|
opts.we_mask |= QM_INITFQ_WE_CGID;
|
|
|
|
opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
|
|
|
|
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
|
|
|
|
}
|
|
|
|
ret = qman_init_fq(rxq, flags, &opts);
|
2018-07-06 08:10:01 +00:00
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
|
|
|
|
"ret:%d(%s)", rxq->fqid, ret, strerror(ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2018-09-21 11:05:59 +00:00
|
|
|
if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
|
|
|
|
rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
|
|
|
|
} else {
|
|
|
|
rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
|
|
|
|
rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
|
|
|
|
}
|
|
|
|
|
2018-01-10 10:46:40 +00:00
|
|
|
rxq->is_static = true;
|
2019-08-29 10:27:11 +00:00
|
|
|
|
|
|
|
/* Allocate qman specific portals */
|
2019-08-29 10:27:12 +00:00
|
|
|
qp = fsl_qman_fq_portal_create(&q_fd);
|
2019-08-29 10:27:11 +00:00
|
|
|
if (!qp) {
|
|
|
|
DPAA_PMD_ERR("Unable to alloc fq portal");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
rxq->qp = qp;
|
2019-08-29 10:27:12 +00:00
|
|
|
|
|
|
|
/* Set up the device interrupt handler */
|
2021-10-22 20:49:32 +00:00
|
|
|
if (dev->intr_handle == NULL) {
|
2019-08-29 10:27:12 +00:00
|
|
|
struct rte_dpaa_device *dpaa_dev;
|
|
|
|
struct rte_device *rdev = dev->device;
|
|
|
|
|
|
|
|
dpaa_dev = container_of(rdev, struct rte_dpaa_device,
|
|
|
|
device);
|
2021-10-22 20:49:32 +00:00
|
|
|
dev->intr_handle = dpaa_dev->intr_handle;
|
|
|
|
if (rte_intr_vec_list_alloc(dev->intr_handle,
|
|
|
|
NULL, dpaa_push_mode_max_queue)) {
|
2019-08-29 10:27:12 +00:00
|
|
|
DPAA_PMD_ERR("intr_vec alloc failed");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2021-10-22 20:49:32 +00:00
|
|
|
if (rte_intr_nb_efd_set(dev->intr_handle,
|
|
|
|
dpaa_push_mode_max_queue))
|
|
|
|
return -rte_errno;
|
|
|
|
|
|
|
|
if (rte_intr_max_intr_set(dev->intr_handle,
|
|
|
|
dpaa_push_mode_max_queue))
|
|
|
|
return -rte_errno;
|
2019-08-29 10:27:12 +00:00
|
|
|
}
|
|
|
|
|
2021-10-22 20:49:32 +00:00
|
|
|
if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_EXT))
|
|
|
|
return -rte_errno;
|
|
|
|
|
|
|
|
if (rte_intr_vec_list_index_set(dev->intr_handle,
|
|
|
|
queue_idx, queue_idx + 1))
|
|
|
|
return -rte_errno;
|
|
|
|
|
|
|
|
if (rte_intr_efds_index_set(dev->intr_handle, queue_idx,
|
|
|
|
q_fd))
|
|
|
|
return -rte_errno;
|
|
|
|
|
2019-08-29 10:27:12 +00:00
|
|
|
rxq->q_fd = q_fd;
|
2018-01-10 10:46:40 +00:00
|
|
|
}
|
2019-03-26 12:01:45 +00:00
|
|
|
rxq->bp_array = rte_dpaa_bpid_info;
|
2018-01-10 10:46:28 +00:00
|
|
|
dev->data->rx_queues[queue_idx] = rxq;
|
|
|
|
|
|
|
|
/* configure the CGR size as per the desc size */
|
|
|
|
if (dpaa_intf->cgr_rx) {
|
|
|
|
struct qm_mcc_initcgr cgr_opts = {0};
|
|
|
|
|
2020-07-10 16:21:37 +00:00
|
|
|
rxq->nb_desc = nb_desc;
|
2018-01-10 10:46:28 +00:00
|
|
|
/* Enable tail drop with cgr on this queue */
|
|
|
|
qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
|
|
|
|
ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_WARN(
|
|
|
|
"rx taildrop modify fail on fqid %d (ret=%d)",
|
|
|
|
rxq->fqid, ret);
|
|
|
|
}
|
|
|
|
}
|
2020-09-24 04:02:08 +00:00
|
|
|
/* Enable main queue to receive error packets also by default */
|
|
|
|
fman_if_set_err_fqid(fif, rxq->fqid);
|
2017-09-28 12:29:44 +00:00
|
|
|
return 0;
|
2017-09-28 12:29:42 +00:00
|
|
|
}
|
|
|
|
|
2018-07-06 08:10:07 +00:00
|
|
|
int
|
2018-01-22 01:48:06 +00:00
|
|
|
dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
|
|
|
|
int eth_rx_queue_id,
|
2018-01-16 20:43:57 +00:00
|
|
|
u16 ch_id,
|
|
|
|
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u32 flags = 0;
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
|
|
|
|
struct qm_mcc_initfq opts = {0};
|
|
|
|
|
|
|
|
if (dpaa_push_mode_max_queue)
|
2018-09-21 11:05:54 +00:00
|
|
|
DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
|
|
|
|
"PUSH mode already enabled for first %d queues.\n"
|
2018-01-16 20:43:57 +00:00
|
|
|
"To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
|
|
|
|
dpaa_push_mode_max_queue);
|
|
|
|
|
|
|
|
dpaa_poll_queue_default_config(&opts);
|
|
|
|
|
|
|
|
switch (queue_conf->ev.sched_type) {
|
|
|
|
case RTE_SCHED_TYPE_ATOMIC:
|
|
|
|
opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
|
|
|
|
/* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
|
|
|
|
* configuration with HOLD_ACTIVE setting
|
|
|
|
*/
|
|
|
|
opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
|
|
|
|
rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
|
|
|
|
break;
|
|
|
|
case RTE_SCHED_TYPE_ORDERED:
|
|
|
|
DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
|
|
|
|
return -1;
|
|
|
|
default:
|
|
|
|
opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
|
|
|
|
rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
|
|
|
|
opts.fqd.dest.channel = ch_id;
|
|
|
|
opts.fqd.dest.wq = queue_conf->ev.priority;
|
|
|
|
|
|
|
|
if (dpaa_intf->cgr_rx) {
|
|
|
|
opts.we_mask |= QM_INITFQ_WE_CGID;
|
|
|
|
opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
|
|
|
|
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
flags = QMAN_INITFQ_FLAG_SCHED;
|
|
|
|
|
|
|
|
ret = qman_init_fq(rxq, flags, &opts);
|
|
|
|
if (ret) {
|
2018-07-06 08:10:01 +00:00
|
|
|
DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
|
|
|
|
"ret:%d(%s)", rxq->fqid, ret, strerror(ret));
|
2018-01-16 20:43:57 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* copy configuration which needs to be filled during dequeue */
|
|
|
|
memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
|
|
|
|
dev->data->rx_queues[eth_rx_queue_id] = rxq;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-07-06 08:10:07 +00:00
|
|
|
int
|
2018-01-22 01:48:06 +00:00
|
|
|
dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
|
|
|
|
int eth_rx_queue_id)
|
2018-01-16 20:43:57 +00:00
|
|
|
{
|
2022-04-22 04:57:29 +00:00
|
|
|
struct qm_mcc_initfq opts = {0};
|
2018-01-16 20:43:57 +00:00
|
|
|
int ret;
|
|
|
|
u32 flags = 0;
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
|
|
|
|
|
2022-04-22 04:57:29 +00:00
|
|
|
qman_retire_fq(rxq, NULL);
|
|
|
|
qman_oos_fq(rxq);
|
2018-01-16 20:43:57 +00:00
|
|
|
ret = qman_init_fq(rxq, flags, &opts);
|
|
|
|
if (ret) {
|
2022-04-22 04:57:29 +00:00
|
|
|
DPAA_PMD_ERR("detach rx fqid %d failed with ret: %d",
|
2018-01-16 20:43:57 +00:00
|
|
|
rxq->fqid, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
rxq->cb.dqrr_dpdk_cb = NULL;
|
|
|
|
dev->data->rx_queues[eth_rx_queue_id] = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
static
|
|
|
|
int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
|
|
|
uint16_t nb_desc __rte_unused,
|
|
|
|
unsigned int socket_id __rte_unused,
|
2020-07-10 16:21:33 +00:00
|
|
|
const struct rte_eth_txconf *tx_conf)
|
2017-09-28 12:29:44 +00:00
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
2020-07-10 16:21:37 +00:00
|
|
|
struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
|
2017-09-28 12:29:44 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-10 16:21:33 +00:00
|
|
|
/* Tx deferred start is not supported */
|
|
|
|
if (tx_conf->tx_deferred_start) {
|
|
|
|
DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-07-10 16:21:37 +00:00
|
|
|
txq->nb_desc = UINT16_MAX;
|
|
|
|
txq->offloads = tx_conf->offloads;
|
|
|
|
|
2018-07-06 08:10:01 +00:00
|
|
|
if (queue_idx >= dev->data->nb_tx_queues) {
|
|
|
|
rte_errno = EOVERFLOW;
|
|
|
|
DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
|
|
|
|
(void *)dev, queue_idx, dev->data->nb_tx_queues);
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
|
2020-07-10 16:21:37 +00:00
|
|
|
queue_idx, txq->fqid);
|
|
|
|
dev->data->tx_queues[queue_idx] = txq;
|
2020-07-07 09:22:24 +00:00
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-10 10:46:36 +00:00
|
|
|
static uint32_t
|
2021-10-13 13:37:00 +00:00
|
|
|
dpaa_dev_rx_queue_count(void *rx_queue)
|
2018-01-10 10:46:36 +00:00
|
|
|
{
|
2021-10-13 13:37:00 +00:00
|
|
|
struct qman_fq *rxq = rx_queue;
|
2018-01-10 10:46:36 +00:00
|
|
|
u32 frm_cnt = 0;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
|
2021-10-13 13:37:00 +00:00
|
|
|
DPAA_PMD_DEBUG("RX frame count for q(%p) is %u",
|
|
|
|
rx_queue, frm_cnt);
|
2018-01-10 10:46:36 +00:00
|
|
|
}
|
|
|
|
return frm_cnt;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:47 +00:00
|
|
|
static int dpaa_link_down(struct rte_eth_dev *dev)
|
|
|
|
{
|
2020-07-07 09:22:29 +00:00
|
|
|
struct fman_if *fif = dev->process_private;
|
|
|
|
struct __fman_if *__fif;
|
|
|
|
|
2017-09-28 12:29:47 +00:00
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:29 +00:00
|
|
|
__fif = container_of(fif, struct __fman_if, __if);
|
|
|
|
|
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
2021-10-22 11:03:12 +00:00
|
|
|
dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
|
2020-07-07 09:22:29 +00:00
|
|
|
else
|
2020-10-15 13:30:45 +00:00
|
|
|
return dpaa_eth_dev_stop(dev);
|
2017-09-28 12:29:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa_link_up(struct rte_eth_dev *dev)
|
|
|
|
{
|
2020-07-07 09:22:29 +00:00
|
|
|
struct fman_if *fif = dev->process_private;
|
|
|
|
struct __fman_if *__fif;
|
|
|
|
|
2017-09-28 12:29:47 +00:00
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:29 +00:00
|
|
|
__fif = container_of(fif, struct __fman_if, __if);
|
|
|
|
|
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
2021-10-22 11:03:12 +00:00
|
|
|
dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
|
2020-07-07 09:22:29 +00:00
|
|
|
else
|
|
|
|
dpaa_eth_dev_start(dev);
|
2017-09-28 12:29:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:53 +00:00
|
|
|
static int
|
|
|
|
dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct rte_eth_fc_conf *net_fc;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (!(dpaa_intf->fc_conf)) {
|
|
|
|
dpaa_intf->fc_conf = rte_zmalloc(NULL,
|
|
|
|
sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
|
|
|
|
if (!dpaa_intf->fc_conf) {
|
|
|
|
DPAA_PMD_ERR("unable to save flow control info");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
net_fc = dpaa_intf->fc_conf;
|
|
|
|
|
|
|
|
if (fc_conf->high_water < fc_conf->low_water) {
|
|
|
|
DPAA_PMD_ERR("Incorrect Flow Control Configuration");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-10-22 11:03:12 +00:00
|
|
|
if (fc_conf->mode == RTE_ETH_FC_NONE) {
|
2017-09-28 12:29:53 +00:00
|
|
|
return 0;
|
2021-10-22 11:03:12 +00:00
|
|
|
} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
|
|
|
|
fc_conf->mode == RTE_ETH_FC_FULL) {
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_set_fc_threshold(dev->process_private,
|
|
|
|
fc_conf->high_water,
|
2017-09-28 12:29:53 +00:00
|
|
|
fc_conf->low_water,
|
2020-07-07 09:22:26 +00:00
|
|
|
dpaa_intf->bp_info->bpid);
|
2017-09-28 12:29:53 +00:00
|
|
|
if (fc_conf->pause_time)
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_set_fc_quanta(dev->process_private,
|
2017-09-28 12:29:53 +00:00
|
|
|
fc_conf->pause_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Save the information in dpaa device */
|
|
|
|
net_fc->pause_time = fc_conf->pause_time;
|
|
|
|
net_fc->high_water = fc_conf->high_water;
|
|
|
|
net_fc->low_water = fc_conf->low_water;
|
|
|
|
net_fc->send_xon = fc_conf->send_xon;
|
|
|
|
net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
|
|
|
|
net_fc->mode = fc_conf->mode;
|
|
|
|
net_fc->autoneg = fc_conf->autoneg;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (net_fc) {
|
|
|
|
fc_conf->pause_time = net_fc->pause_time;
|
|
|
|
fc_conf->high_water = net_fc->high_water;
|
|
|
|
fc_conf->low_water = net_fc->low_water;
|
|
|
|
fc_conf->send_xon = net_fc->send_xon;
|
|
|
|
fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
|
|
|
|
fc_conf->mode = net_fc->mode;
|
|
|
|
fc_conf->autoneg = net_fc->autoneg;
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-07 09:22:26 +00:00
|
|
|
ret = fman_if_get_fc_threshold(dev->process_private);
|
2017-09-28 12:29:53 +00:00
|
|
|
if (ret) {
|
2021-10-22 11:03:12 +00:00
|
|
|
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
|
2020-07-07 09:22:26 +00:00
|
|
|
fc_conf->pause_time =
|
|
|
|
fman_if_get_fc_quanta(dev->process_private);
|
2017-09-28 12:29:53 +00:00
|
|
|
} else {
|
2021-10-22 11:03:12 +00:00
|
|
|
fc_conf->mode = RTE_ETH_FC_NONE;
|
2017-09-28 12:29:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:51 +00:00
|
|
|
static int
|
|
|
|
dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *addr,
|
2017-09-28 12:29:51 +00:00
|
|
|
uint32_t index,
|
|
|
|
__rte_unused uint32_t pool)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
ret = fman_if_add_mac_addr(dev->process_private,
|
|
|
|
addr->addr_bytes, index);
|
2017-09-28 12:29:51 +00:00
|
|
|
|
|
|
|
if (ret)
|
2020-03-31 04:41:55 +00:00
|
|
|
DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
|
2017-09-28 12:29:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
|
|
|
|
uint32_t index)
|
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_clear_mac_addr(dev->process_private, index);
|
2017-09-28 12:29:51 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 16:32:51 +00:00
|
|
|
static int
|
2017-09-28 12:29:51 +00:00
|
|
|
dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *addr)
|
2017-09-28 12:29:51 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
|
2017-09-28 12:29:51 +00:00
|
|
|
if (ret)
|
2020-03-31 04:41:55 +00:00
|
|
|
DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
|
2018-04-11 16:32:51 +00:00
|
|
|
|
|
|
|
return ret;
|
2017-09-28 12:29:51 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 08:39:30 +00:00
|
|
|
static int
|
|
|
|
dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev_data *data = dev->data;
|
|
|
|
struct rte_eth_conf *eth_conf = &data->dev_conf;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (!(default_q || fmc_q)) {
|
|
|
|
if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
|
|
|
|
DPAA_PMD_ERR("FM port configuration: Failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
|
|
|
|
} else {
|
|
|
|
DPAA_PMD_ERR("Function not supported\n");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev_data *data = dev->data;
|
|
|
|
struct rte_eth_conf *eth_conf = &data->dev_conf;
|
|
|
|
|
|
|
|
/* dpaa does not support rss_key, so length should be 0*/
|
|
|
|
rss_conf->rss_key_len = 0;
|
|
|
|
rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-29 10:27:13 +00:00
|
|
|
static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
|
|
|
|
uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
|
|
|
|
|
|
|
|
if (!rxq->is_static)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
|
|
|
|
uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
|
|
|
|
uint32_t temp;
|
|
|
|
ssize_t temp1;
|
|
|
|
|
|
|
|
if (!rxq->is_static)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
qman_fq_portal_irqsource_remove(rxq->qp, ~0);
|
|
|
|
|
|
|
|
temp1 = read(rxq->q_fd, &temp, sizeof(temp));
|
|
|
|
if (temp1 != sizeof(temp))
|
2022-10-07 03:27:28 +00:00
|
|
|
DPAA_PMD_DEBUG("read did not return anything");
|
2019-08-29 10:27:13 +00:00
|
|
|
|
|
|
|
qman_fq_portal_thread_irq(rxq->qp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-10 16:21:37 +00:00
|
|
|
static void
|
|
|
|
dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_rxq_info *qinfo)
|
|
|
|
{
|
|
|
|
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
|
|
|
struct qman_fq *rxq;
|
2021-02-24 12:43:11 +00:00
|
|
|
int ret;
|
2020-07-10 16:21:37 +00:00
|
|
|
|
|
|
|
rxq = dev->data->rx_queues[queue_id];
|
|
|
|
|
|
|
|
qinfo->mp = dpaa_intf->bp_info->mp;
|
|
|
|
qinfo->scattered_rx = dev->data->scattered_rx;
|
|
|
|
qinfo->nb_desc = rxq->nb_desc;
|
2021-02-24 12:43:11 +00:00
|
|
|
|
|
|
|
/* Report the HW Rx buffer length to user */
|
|
|
|
ret = fman_if_get_maxfrm(dev->process_private);
|
|
|
|
if (ret > 0)
|
|
|
|
qinfo->rx_buf_size = ret;
|
|
|
|
|
2020-07-10 16:21:37 +00:00
|
|
|
qinfo->conf.rx_free_thresh = 1;
|
|
|
|
qinfo->conf.rx_drop_en = 1;
|
|
|
|
qinfo->conf.rx_deferred_start = 0;
|
|
|
|
qinfo->conf.offloads = rxq->offloads;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_txq_info *qinfo)
|
|
|
|
{
|
|
|
|
struct qman_fq *txq;
|
|
|
|
|
|
|
|
txq = dev->data->tx_queues[queue_id];
|
|
|
|
|
|
|
|
qinfo->nb_desc = txq->nb_desc;
|
|
|
|
qinfo->conf.tx_thresh.pthresh = 0;
|
|
|
|
qinfo->conf.tx_thresh.hthresh = 0;
|
|
|
|
qinfo->conf.tx_thresh.wthresh = 0;
|
|
|
|
|
|
|
|
qinfo->conf.tx_free_thresh = 0;
|
|
|
|
qinfo->conf.tx_rs_thresh = 0;
|
|
|
|
qinfo->conf.offloads = txq->offloads;
|
|
|
|
qinfo->conf.tx_deferred_start = 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
static struct eth_dev_ops dpaa_devops = {
|
|
|
|
.dev_configure = dpaa_eth_dev_configure,
|
|
|
|
.dev_start = dpaa_eth_dev_start,
|
|
|
|
.dev_stop = dpaa_eth_dev_stop,
|
|
|
|
.dev_close = dpaa_eth_dev_close,
|
2017-09-28 12:29:48 +00:00
|
|
|
.dev_infos_get = dpaa_eth_dev_info,
|
2017-09-28 12:29:55 +00:00
|
|
|
.dev_supported_ptypes_get = dpaa_supported_ptypes_get,
|
2017-09-28 12:29:44 +00:00
|
|
|
|
|
|
|
.rx_queue_setup = dpaa_eth_rx_queue_setup,
|
|
|
|
.tx_queue_setup = dpaa_eth_tx_queue_setup,
|
2020-07-10 16:21:31 +00:00
|
|
|
.rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
|
|
|
|
.tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
|
2020-07-10 16:21:37 +00:00
|
|
|
.rxq_info_get = dpaa_rxq_info_get,
|
|
|
|
.txq_info_get = dpaa_txq_info_get,
|
|
|
|
|
2017-09-28 12:29:53 +00:00
|
|
|
.flow_ctrl_get = dpaa_flow_ctrl_get,
|
|
|
|
.flow_ctrl_set = dpaa_flow_ctrl_set,
|
|
|
|
|
2017-09-28 12:29:47 +00:00
|
|
|
.link_update = dpaa_eth_link_update,
|
2017-09-28 12:29:52 +00:00
|
|
|
.stats_get = dpaa_eth_stats_get,
|
2017-09-28 12:30:00 +00:00
|
|
|
.xstats_get = dpaa_dev_xstats_get,
|
|
|
|
.xstats_get_by_id = dpaa_xstats_get_by_id,
|
|
|
|
.xstats_get_names_by_id = dpaa_xstats_get_names_by_id,
|
|
|
|
.xstats_get_names = dpaa_xstats_get_names,
|
|
|
|
.xstats_reset = dpaa_eth_stats_reset,
|
2017-09-28 12:29:52 +00:00
|
|
|
.stats_reset = dpaa_eth_stats_reset,
|
2017-09-28 12:29:49 +00:00
|
|
|
.promiscuous_enable = dpaa_eth_promiscuous_enable,
|
|
|
|
.promiscuous_disable = dpaa_eth_promiscuous_disable,
|
2017-09-28 12:29:50 +00:00
|
|
|
.allmulticast_enable = dpaa_eth_multicast_enable,
|
|
|
|
.allmulticast_disable = dpaa_eth_multicast_disable,
|
2017-09-28 12:29:45 +00:00
|
|
|
.mtu_set = dpaa_mtu_set,
|
2017-09-28 12:29:47 +00:00
|
|
|
.dev_set_link_down = dpaa_link_down,
|
|
|
|
.dev_set_link_up = dpaa_link_up,
|
2017-09-28 12:29:51 +00:00
|
|
|
.mac_addr_add = dpaa_dev_add_mac_addr,
|
|
|
|
.mac_addr_remove = dpaa_dev_remove_mac_addr,
|
|
|
|
.mac_addr_set = dpaa_dev_set_mac_addr,
|
|
|
|
|
2017-09-28 12:29:59 +00:00
|
|
|
.fw_version_get = dpaa_fw_version_get,
|
2019-08-29 10:27:13 +00:00
|
|
|
|
|
|
|
.rx_queue_intr_enable = dpaa_dev_queue_intr_enable,
|
|
|
|
.rx_queue_intr_disable = dpaa_dev_queue_intr_disable,
|
2020-09-04 08:39:30 +00:00
|
|
|
.rss_hash_update = dpaa_dev_rss_hash_update,
|
|
|
|
.rss_hash_conf_get = dpaa_dev_rss_hash_conf_get,
|
2017-09-28 12:29:42 +00:00
|
|
|
};
|
|
|
|
|
2018-01-10 10:46:37 +00:00
|
|
|
static bool
|
|
|
|
is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
|
|
|
|
{
|
|
|
|
if (strcmp(dev->device->driver->name,
|
|
|
|
drv->driver.name))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
is_dpaa_supported(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
return is_device_supported(dev, &rte_dpaa_pmd);
|
|
|
|
}
|
|
|
|
|
2018-07-06 08:10:07 +00:00
|
|
|
int
|
2020-08-25 17:51:06 +00:00
|
|
|
rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
|
2018-01-10 10:46:37 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port];
|
|
|
|
|
|
|
|
if (!is_dpaa_supported(dev))
|
|
|
|
return -ENOTSUP;
|
|
|
|
|
|
|
|
if (on)
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_loopback_enable(dev->process_private);
|
2018-01-10 10:46:37 +00:00
|
|
|
else
|
2020-07-07 09:22:26 +00:00
|
|
|
fman_if_loopback_disable(dev->process_private);
|
2018-01-10 10:46:37 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
|
|
|
|
struct fman_if *fman_intf)
|
2017-09-28 12:29:53 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_fc_conf *fc_conf;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (!(dpaa_intf->fc_conf)) {
|
|
|
|
dpaa_intf->fc_conf = rte_zmalloc(NULL,
|
|
|
|
sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
|
|
|
|
if (!dpaa_intf->fc_conf) {
|
|
|
|
DPAA_PMD_ERR("unable to save flow control info");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fc_conf = dpaa_intf->fc_conf;
|
2020-07-07 09:22:26 +00:00
|
|
|
ret = fman_if_get_fc_threshold(fman_intf);
|
2017-09-28 12:29:53 +00:00
|
|
|
if (ret) {
|
2021-10-22 11:03:12 +00:00
|
|
|
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
|
2020-07-07 09:22:26 +00:00
|
|
|
fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
|
2017-09-28 12:29:53 +00:00
|
|
|
} else {
|
2021-10-22 11:03:12 +00:00
|
|
|
fc_conf->mode = RTE_ETH_FC_NONE;
|
2017-09-28 12:29:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
/* Initialise an Rx FQ */
|
2018-01-10 10:46:28 +00:00
|
|
|
static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
|
2017-09-28 12:29:44 +00:00
|
|
|
uint32_t fqid)
|
|
|
|
{
|
2018-01-10 10:46:23 +00:00
|
|
|
struct qm_mcc_initfq opts = {0};
|
2017-09-28 12:29:44 +00:00
|
|
|
int ret;
|
2018-09-21 11:05:50 +00:00
|
|
|
u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
|
2018-01-10 10:46:28 +00:00
|
|
|
struct qm_mcc_initcgr cgr_opts = {
|
|
|
|
.we_mask = QM_CGR_WE_CS_THRES |
|
|
|
|
QM_CGR_WE_CSTD_EN |
|
|
|
|
QM_CGR_WE_MODE,
|
|
|
|
.cgr = {
|
|
|
|
.cstd_en = QM_CGR_EN,
|
|
|
|
.mode = QMAN_CGR_MODE_FRAME
|
|
|
|
}
|
|
|
|
};
|
2017-09-28 12:29:44 +00:00
|
|
|
|
2020-09-04 08:39:25 +00:00
|
|
|
if (fmc_q || default_q) {
|
2018-09-21 11:05:50 +00:00
|
|
|
ret = qman_reserve_fqid(fqid);
|
|
|
|
if (ret) {
|
2020-09-04 08:39:25 +00:00
|
|
|
DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
|
2018-09-21 11:05:50 +00:00
|
|
|
fqid, ret);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-09-28 12:29:44 +00:00
|
|
|
}
|
2020-09-04 08:39:25 +00:00
|
|
|
|
2018-07-06 08:10:06 +00:00
|
|
|
DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
|
2018-09-21 11:05:50 +00:00
|
|
|
ret = qman_create_fq(fqid, flags, fq);
|
2017-09-28 12:29:44 +00:00
|
|
|
if (ret) {
|
2018-07-06 08:10:01 +00:00
|
|
|
DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
|
2017-09-28 12:29:44 +00:00
|
|
|
fqid, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-01-10 10:46:40 +00:00
|
|
|
fq->is_static = false;
|
2018-01-16 20:43:57 +00:00
|
|
|
|
|
|
|
dpaa_poll_queue_default_config(&opts);
|
2017-09-28 12:29:44 +00:00
|
|
|
|
2018-01-10 10:46:28 +00:00
|
|
|
if (cgr_rx) {
|
|
|
|
/* Enable tail drop with cgr on this queue */
|
|
|
|
qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
|
|
|
|
cgr_rx->cb = NULL;
|
|
|
|
ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
|
|
|
|
&cgr_opts);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_WARN(
|
2018-07-06 08:10:06 +00:00
|
|
|
"rx taildrop init fail on rx fqid 0x%x(ret=%d)",
|
2018-09-21 11:05:50 +00:00
|
|
|
fq->fqid, ret);
|
2018-01-10 10:46:28 +00:00
|
|
|
goto without_cgr;
|
|
|
|
}
|
|
|
|
opts.we_mask |= QM_INITFQ_WE_CGID;
|
|
|
|
opts.fqd.cgid = cgr_rx->cgrid;
|
|
|
|
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
|
|
|
|
}
|
|
|
|
without_cgr:
|
2018-09-21 11:05:50 +00:00
|
|
|
ret = qman_init_fq(fq, 0, &opts);
|
2017-09-28 12:29:44 +00:00
|
|
|
if (ret)
|
2018-07-06 08:10:06 +00:00
|
|
|
DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
|
2017-09-28 12:29:44 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialise a Tx FQ */
|
|
|
|
static int dpaa_tx_queue_init(struct qman_fq *fq,
|
2020-07-07 09:22:24 +00:00
|
|
|
struct fman_if *fman_intf,
|
|
|
|
struct qman_cgr *cgr_tx)
|
2017-09-28 12:29:44 +00:00
|
|
|
{
|
2018-01-10 10:46:23 +00:00
|
|
|
struct qm_mcc_initfq opts = {0};
|
2020-07-07 09:22:24 +00:00
|
|
|
struct qm_mcc_initcgr cgr_opts = {
|
|
|
|
.we_mask = QM_CGR_WE_CS_THRES |
|
|
|
|
QM_CGR_WE_CSTD_EN |
|
|
|
|
QM_CGR_WE_MODE,
|
|
|
|
.cgr = {
|
|
|
|
.cstd_en = QM_CGR_EN,
|
|
|
|
.mode = QMAN_CGR_MODE_FRAME
|
|
|
|
}
|
|
|
|
};
|
2017-09-28 12:29:44 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
|
|
|
|
QMAN_FQ_FLAG_TO_DCPORTAL, fq);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
|
|
|
|
QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
|
|
|
|
opts.fqd.dest.channel = fman_intf->tx_channel_id;
|
|
|
|
opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
|
|
|
|
opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
|
|
|
|
opts.fqd.context_b = 0;
|
|
|
|
/* no tx-confirmation */
|
|
|
|
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
|
|
|
|
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
|
2022-01-03 10:01:26 +00:00
|
|
|
if (fman_ip_rev >= FMAN_V3) {
|
|
|
|
/* Set B0V bit in contextA to set ASPID to 0 */
|
|
|
|
opts.fqd.context_a.hi |= 0x04000000;
|
|
|
|
}
|
2018-07-06 08:10:06 +00:00
|
|
|
DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
|
2020-07-07 09:22:24 +00:00
|
|
|
|
|
|
|
if (cgr_tx) {
|
|
|
|
/* Enable tail drop with cgr on this queue */
|
|
|
|
qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
|
|
|
|
td_tx_threshold, 0);
|
|
|
|
cgr_tx->cb = NULL;
|
|
|
|
ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
|
|
|
|
&cgr_opts);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_WARN(
|
|
|
|
"rx taildrop init fail on rx fqid 0x%x(ret=%d)",
|
|
|
|
fq->fqid, ret);
|
|
|
|
goto without_cgr;
|
|
|
|
}
|
|
|
|
opts.we_mask |= QM_INITFQ_WE_CGID;
|
|
|
|
opts.fqd.cgid = cgr_tx->cgrid;
|
|
|
|
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
|
|
|
|
DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
|
|
|
|
td_tx_threshold);
|
|
|
|
}
|
|
|
|
without_cgr:
|
2017-09-28 12:29:44 +00:00
|
|
|
ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
|
|
|
|
if (ret)
|
2018-07-06 08:10:06 +00:00
|
|
|
DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
|
2017-09-28 12:29:44 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:58 +00:00
|
|
|
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
|
|
|
|
/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
|
|
|
|
static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
|
|
|
|
{
|
2018-01-10 10:46:23 +00:00
|
|
|
struct qm_mcc_initfq opts = {0};
|
2017-09-28 12:29:58 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
ret = qman_reserve_fqid(fqid);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
|
|
|
|
fqid, ret);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
/* "map" this Rx FQ to one of the interfaces Tx FQID */
|
|
|
|
DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
|
|
|
|
ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
|
|
|
|
fqid, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
|
|
|
|
opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
|
|
|
|
ret = qman_init_fq(fq, 0, &opts);
|
|
|
|
if (ret)
|
|
|
|
DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
|
|
|
|
fqid, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
/* Initialise a network interface */
|
|
|
|
static int
|
|
|
|
dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct rte_dpaa_device *dpaa_device;
|
|
|
|
struct fm_eth_port_cfg *cfg;
|
|
|
|
struct dpaa_if *dpaa_intf;
|
|
|
|
struct fman_if *fman_intf;
|
|
|
|
int dev_id;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
|
|
|
|
dev_id = dpaa_device->id.dev_id;
|
|
|
|
cfg = dpaa_get_eth_port_cfg(dev_id);
|
|
|
|
fman_intf = cfg->fman_if;
|
|
|
|
eth_dev->process_private = fman_intf;
|
|
|
|
|
|
|
|
/* Plugging of UCODE burst API not supported in Secondary */
|
|
|
|
dpaa_intf = eth_dev->data->dev_private;
|
|
|
|
eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
|
|
|
|
if (dpaa_intf->cgr_tx)
|
|
|
|
eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
|
|
|
|
else
|
|
|
|
eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
|
|
|
|
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
|
|
|
|
qman_set_fq_lookup_table(
|
|
|
|
dpaa_intf->rx_queues->qman_fq_lookup_table);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
/* Initialise a network interface */
|
|
|
|
static int
|
|
|
|
dpaa_dev_init(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
2019-06-25 10:40:19 +00:00
|
|
|
int num_rx_fqs, fqid;
|
2017-09-28 12:29:44 +00:00
|
|
|
int loop, ret = 0;
|
2017-09-28 12:29:42 +00:00
|
|
|
int dev_id;
|
|
|
|
struct rte_dpaa_device *dpaa_device;
|
|
|
|
struct dpaa_if *dpaa_intf;
|
2017-09-28 12:29:44 +00:00
|
|
|
struct fm_eth_port_cfg *cfg;
|
|
|
|
struct fman_if *fman_intf;
|
|
|
|
struct fman_if_bpool *bp, *tmp_bp;
|
2018-01-10 10:46:28 +00:00
|
|
|
uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
|
2020-07-07 09:22:24 +00:00
|
|
|
uint32_t cgrid_tx[MAX_DPAA_CORES];
|
2020-09-04 08:39:25 +00:00
|
|
|
uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
|
2020-09-04 08:39:28 +00:00
|
|
|
int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
|
|
|
|
int8_t vsp_id = -1;
|
2017-09-28 12:29:42 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
|
|
|
|
dev_id = dpaa_device->id.dev_id;
|
|
|
|
dpaa_intf = eth_dev->data->dev_private;
|
2020-05-15 09:47:50 +00:00
|
|
|
cfg = dpaa_get_eth_port_cfg(dev_id);
|
2017-09-28 12:29:44 +00:00
|
|
|
fman_intf = cfg->fman_if;
|
2017-09-28 12:29:42 +00:00
|
|
|
|
|
|
|
dpaa_intf->name = dpaa_device->name;
|
|
|
|
|
2021-11-29 16:08:02 +00:00
|
|
|
/* save fman_if & cfg in the interface structure */
|
2020-07-07 09:22:26 +00:00
|
|
|
eth_dev->process_private = fman_intf;
|
2017-09-28 12:29:42 +00:00
|
|
|
dpaa_intf->ifid = dev_id;
|
2017-09-28 12:29:44 +00:00
|
|
|
dpaa_intf->cfg = cfg;
|
|
|
|
|
2020-09-04 08:39:25 +00:00
|
|
|
memset((char *)dev_rx_fqids, 0,
|
|
|
|
sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
|
|
|
|
|
2020-09-04 08:39:28 +00:00
|
|
|
memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
|
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
/* Initialize Rx FQ's */
|
2018-07-06 08:10:06 +00:00
|
|
|
if (default_q) {
|
2017-09-28 12:29:44 +00:00
|
|
|
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
|
2020-09-04 08:39:25 +00:00
|
|
|
} else if (fmc_q) {
|
2020-09-04 08:39:29 +00:00
|
|
|
num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
|
|
|
|
dev_vspids,
|
|
|
|
DPAA_MAX_NUM_PCD_QUEUES);
|
|
|
|
if (num_rx_fqs < 0) {
|
|
|
|
DPAA_PMD_ERR("%s FMC initializes failed!",
|
|
|
|
dpaa_intf->name);
|
|
|
|
goto free_rx;
|
|
|
|
}
|
|
|
|
if (!num_rx_fqs) {
|
|
|
|
DPAA_PMD_WARN("%s is not configured by FMC.",
|
|
|
|
dpaa_intf->name);
|
|
|
|
}
|
2018-07-06 08:10:06 +00:00
|
|
|
} else {
|
2020-09-04 08:39:25 +00:00
|
|
|
/* FMCLESS mode, load balance to multiple cores.*/
|
|
|
|
num_rx_fqs = rte_lcore_count();
|
2018-07-06 08:10:06 +00:00
|
|
|
}
|
|
|
|
|
2018-04-09 10:22:46 +00:00
|
|
|
/* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
|
2017-09-28 12:29:44 +00:00
|
|
|
* queues.
|
|
|
|
*/
|
2020-09-04 08:39:25 +00:00
|
|
|
if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
|
2017-09-28 12:29:44 +00:00
|
|
|
DPAA_PMD_ERR("Invalid number of RX queues\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-09-04 08:39:25 +00:00
|
|
|
if (num_rx_fqs > 0) {
|
|
|
|
dpaa_intf->rx_queues = rte_zmalloc(NULL,
|
|
|
|
sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
|
|
|
|
if (!dpaa_intf->rx_queues) {
|
|
|
|
DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dpaa_intf->rx_queues = NULL;
|
2018-01-18 11:48:56 +00:00
|
|
|
}
|
2018-01-10 10:46:28 +00:00
|
|
|
|
2020-07-07 09:22:24 +00:00
|
|
|
memset(cgrid, 0, sizeof(cgrid));
|
|
|
|
memset(cgrid_tx, 0, sizeof(cgrid_tx));
|
|
|
|
|
|
|
|
/* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
|
|
|
|
* Tx tail drop is disabled.
|
|
|
|
*/
|
|
|
|
if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
|
|
|
|
td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
|
|
|
|
DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
|
|
|
|
td_tx_threshold);
|
|
|
|
/* if a very large value is being configured */
|
|
|
|
if (td_tx_threshold > UINT16_MAX)
|
|
|
|
td_tx_threshold = CGR_RX_PERFQ_THRESH;
|
|
|
|
}
|
|
|
|
|
2018-01-10 10:46:28 +00:00
|
|
|
/* If congestion control is enabled globally*/
|
2020-09-04 08:39:25 +00:00
|
|
|
if (num_rx_fqs > 0 && td_threshold) {
|
2018-01-10 10:46:28 +00:00
|
|
|
dpaa_intf->cgr_rx = rte_zmalloc(NULL,
|
|
|
|
sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
|
2018-01-18 11:48:56 +00:00
|
|
|
if (!dpaa_intf->cgr_rx) {
|
|
|
|
DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_rx;
|
|
|
|
}
|
2018-01-10 10:46:28 +00:00
|
|
|
|
|
|
|
ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
|
|
|
|
if (ret != num_rx_fqs) {
|
|
|
|
DPAA_PMD_WARN("insufficient CGRIDs available");
|
2018-01-18 11:48:56 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_rx;
|
2018-01-10 10:46:28 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dpaa_intf->cgr_rx = NULL;
|
|
|
|
}
|
|
|
|
|
2020-09-04 08:39:25 +00:00
|
|
|
if (!fmc_q && !default_q) {
|
|
|
|
ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
|
|
|
|
num_rx_fqs, 0);
|
|
|
|
if (ret < 0) {
|
|
|
|
DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
|
|
|
|
goto free_rx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
for (loop = 0; loop < num_rx_fqs; loop++) {
|
2018-07-06 08:10:06 +00:00
|
|
|
if (default_q)
|
|
|
|
fqid = cfg->rx_def;
|
|
|
|
else
|
2020-09-04 08:39:25 +00:00
|
|
|
fqid = dev_rx_fqids[loop];
|
2018-01-10 10:46:28 +00:00
|
|
|
|
2020-09-04 08:39:28 +00:00
|
|
|
vsp_id = dev_vspids[loop];
|
|
|
|
|
2018-01-10 10:46:28 +00:00
|
|
|
if (dpaa_intf->cgr_rx)
|
|
|
|
dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
|
|
|
|
|
|
|
|
ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
|
|
|
|
dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
|
|
|
|
fqid);
|
2017-09-28 12:29:44 +00:00
|
|
|
if (ret)
|
2018-01-18 11:48:56 +00:00
|
|
|
goto free_rx;
|
2020-09-04 08:39:28 +00:00
|
|
|
dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
|
2017-09-28 12:29:44 +00:00
|
|
|
dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
|
|
|
|
}
|
|
|
|
dpaa_intf->nb_rx_queues = num_rx_fqs;
|
2017-09-28 12:29:42 +00:00
|
|
|
|
2018-01-18 11:48:56 +00:00
|
|
|
/* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
|
2017-09-28 12:29:44 +00:00
|
|
|
dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
|
2019-06-25 10:40:19 +00:00
|
|
|
MAX_DPAA_CORES, MAX_CACHELINE);
|
2018-01-18 11:48:56 +00:00
|
|
|
if (!dpaa_intf->tx_queues) {
|
|
|
|
DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_rx;
|
|
|
|
}
|
2017-09-28 12:29:44 +00:00
|
|
|
|
2020-07-07 09:22:24 +00:00
|
|
|
/* If congestion control is enabled globally*/
|
|
|
|
if (td_tx_threshold) {
|
|
|
|
dpaa_intf->cgr_tx = rte_zmalloc(NULL,
|
|
|
|
sizeof(struct qman_cgr) * MAX_DPAA_CORES,
|
|
|
|
MAX_CACHELINE);
|
|
|
|
if (!dpaa_intf->cgr_tx) {
|
|
|
|
DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
|
|
|
|
1, 0);
|
|
|
|
if (ret != MAX_DPAA_CORES) {
|
|
|
|
DPAA_PMD_WARN("insufficient CGRIDs available");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto free_rx;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dpaa_intf->cgr_tx = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-25 10:40:19 +00:00
|
|
|
for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
|
2020-07-07 09:22:24 +00:00
|
|
|
if (dpaa_intf->cgr_tx)
|
|
|
|
dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
|
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
|
2020-07-07 09:22:24 +00:00
|
|
|
fman_intf,
|
|
|
|
dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
|
2017-09-28 12:29:44 +00:00
|
|
|
if (ret)
|
2018-01-18 11:48:56 +00:00
|
|
|
goto free_tx;
|
2017-09-28 12:29:44 +00:00
|
|
|
dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
|
|
|
|
}
|
2019-06-25 10:40:19 +00:00
|
|
|
dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
|
2017-09-28 12:29:44 +00:00
|
|
|
|
2017-09-28 12:29:58 +00:00
|
|
|
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
|
2020-09-24 04:02:07 +00:00
|
|
|
ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
|
|
|
|
[DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
|
|
|
|
goto free_tx;
|
|
|
|
}
|
2017-09-28 12:29:58 +00:00
|
|
|
dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
|
2020-09-24 04:02:07 +00:00
|
|
|
ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
|
|
|
|
[DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
|
|
|
|
goto free_tx;
|
|
|
|
}
|
2017-09-28 12:29:58 +00:00
|
|
|
dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
|
|
|
|
#endif
|
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
DPAA_PMD_DEBUG("All frame queues created");
|
|
|
|
|
2017-09-28 12:29:53 +00:00
|
|
|
/* Get the initial configuration for flow control */
|
2020-07-07 09:22:26 +00:00
|
|
|
dpaa_fc_set_default(dpaa_intf, fman_intf);
|
2017-09-28 12:29:53 +00:00
|
|
|
|
2017-09-28 12:29:44 +00:00
|
|
|
/* reset bpool list, initialize bpool dynamically */
|
|
|
|
list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
|
|
|
|
list_del(&bp->node);
|
2019-03-26 12:01:46 +00:00
|
|
|
rte_free(bp);
|
2017-09-28 12:29:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Populate ethdev structure */
|
2017-09-28 12:29:42 +00:00
|
|
|
eth_dev->dev_ops = &dpaa_devops;
|
2020-09-09 13:01:43 +00:00
|
|
|
eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
|
2017-09-28 12:29:44 +00:00
|
|
|
eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
|
|
|
|
eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
|
|
|
|
|
|
|
|
/* Allocate memory for storing MAC addresses */
|
|
|
|
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
|
2019-05-21 16:13:05 +00:00
|
|
|
RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
|
2017-09-28 12:29:44 +00:00
|
|
|
if (eth_dev->data->mac_addrs == NULL) {
|
|
|
|
DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
|
|
|
|
"store MAC addresses",
|
2019-05-21 16:13:05 +00:00
|
|
|
RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
|
2018-01-18 11:48:56 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_tx;
|
2017-09-28 12:29:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* copy the primary mac address */
|
2019-05-21 16:13:04 +00:00
|
|
|
rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
|
2017-09-28 12:29:44 +00:00
|
|
|
|
2021-08-25 17:27:33 +00:00
|
|
|
RTE_LOG(INFO, PMD, "net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT "\n",
|
2021-08-25 17:27:34 +00:00
|
|
|
dpaa_device->name, RTE_ETHER_ADDR_BYTES(&fman_intf->mac_addr));
|
2020-09-04 08:39:25 +00:00
|
|
|
|
2020-09-04 08:39:26 +00:00
|
|
|
if (!fman_intf->is_shared_mac) {
|
2020-09-24 04:02:08 +00:00
|
|
|
/* Configure error packet handling */
|
2020-09-24 04:02:07 +00:00
|
|
|
fman_if_receive_rx_errors(fman_intf,
|
|
|
|
FM_FD_RX_STATUS_ERR_MASK);
|
2020-09-24 04:02:08 +00:00
|
|
|
/* Disable RX mode */
|
2020-09-04 08:39:26 +00:00
|
|
|
fman_if_disable_rx(fman_intf);
|
|
|
|
/* Disable promiscuous mode */
|
|
|
|
fman_if_promiscuous_disable(fman_intf);
|
|
|
|
/* Disable multicast */
|
|
|
|
fman_if_reset_mcast_filter_table(fman_intf);
|
|
|
|
/* Reset interface statistics */
|
|
|
|
fman_if_stats_reset(fman_intf);
|
|
|
|
/* Disable SG by default */
|
|
|
|
fman_if_set_sg(fman_intf, 0);
|
|
|
|
fman_if_set_maxfrm(fman_intf,
|
|
|
|
RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
|
|
|
|
}
|
2017-09-28 12:29:42 +00:00
|
|
|
|
|
|
|
return 0;
|
2018-01-18 11:48:56 +00:00
|
|
|
|
|
|
|
free_tx:
|
|
|
|
rte_free(dpaa_intf->tx_queues);
|
|
|
|
dpaa_intf->tx_queues = NULL;
|
|
|
|
dpaa_intf->nb_tx_queues = 0;
|
|
|
|
|
|
|
|
free_rx:
|
|
|
|
rte_free(dpaa_intf->cgr_rx);
|
2020-07-07 09:22:24 +00:00
|
|
|
rte_free(dpaa_intf->cgr_tx);
|
2018-01-18 11:48:56 +00:00
|
|
|
rte_free(dpaa_intf->rx_queues);
|
|
|
|
dpaa_intf->rx_queues = NULL;
|
|
|
|
dpaa_intf->nb_rx_queues = 0;
|
|
|
|
return ret;
|
2017-09-28 12:29:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-09-04 08:39:25 +00:00
|
|
|
rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
|
2017-09-28 12:29:42 +00:00
|
|
|
struct rte_dpaa_device *dpaa_dev)
|
|
|
|
{
|
|
|
|
int diag;
|
|
|
|
int ret;
|
|
|
|
struct rte_eth_dev *eth_dev;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
2019-07-25 11:06:43 +00:00
|
|
|
if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
|
|
|
|
RTE_PKTMBUF_HEADROOM) {
|
|
|
|
DPAA_PMD_ERR(
|
|
|
|
"RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
|
|
|
|
RTE_PKTMBUF_HEADROOM,
|
|
|
|
DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
/* In case of secondary process, the device is already configured
|
|
|
|
* and no further action is required, except portal initialization
|
|
|
|
* and verifying secondary attachment to port name.
|
|
|
|
*/
|
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
|
|
|
|
eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
|
|
|
|
if (!eth_dev)
|
|
|
|
return -ENOMEM;
|
2018-07-20 14:54:23 +00:00
|
|
|
eth_dev->device = &dpaa_dev->device;
|
|
|
|
eth_dev->dev_ops = &dpaa_devops;
|
2020-07-07 09:22:26 +00:00
|
|
|
|
|
|
|
ret = dpaa_dev_init_secondary(eth_dev);
|
|
|
|
if (ret != 0) {
|
|
|
|
RTE_LOG(ERR, PMD, "secondary dev init failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-10 23:58:30 +00:00
|
|
|
rte_eth_dev_probing_finish(eth_dev);
|
2017-09-28 12:29:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-25 10:40:19 +00:00
|
|
|
if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
|
2018-07-06 08:10:06 +00:00
|
|
|
if (access("/tmp/fmc.bin", F_OK) == -1) {
|
2020-03-31 04:41:55 +00:00
|
|
|
DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
|
2018-07-06 08:10:06 +00:00
|
|
|
default_q = 1;
|
|
|
|
}
|
|
|
|
|
2020-09-04 08:39:25 +00:00
|
|
|
if (!(default_q || fmc_q)) {
|
|
|
|
if (dpaa_fm_init()) {
|
|
|
|
DPAA_PMD_ERR("FM init failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-06 08:10:11 +00:00
|
|
|
/* disabling the default push mode for LS1043 */
|
|
|
|
if (dpaa_svr_family == SVR_LS1043A_FAMILY)
|
|
|
|
dpaa_push_mode_max_queue = 0;
|
|
|
|
|
2021-11-29 16:08:02 +00:00
|
|
|
/* if push mode queues to be enabled. Currently we are allowing
|
2018-07-06 08:10:11 +00:00
|
|
|
* only one queue per thread.
|
|
|
|
*/
|
|
|
|
if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
|
|
|
|
dpaa_push_mode_max_queue =
|
|
|
|
atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
|
|
|
|
if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
|
|
|
|
dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
is_global_init = 1;
|
|
|
|
}
|
|
|
|
|
2020-07-07 09:22:27 +00:00
|
|
|
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
|
2018-01-23 12:27:06 +00:00
|
|
|
ret = rte_dpaa_portal_init((void *)1);
|
|
|
|
if (ret) {
|
|
|
|
DPAA_PMD_ERR("Unable to initialize portal");
|
|
|
|
return ret;
|
|
|
|
}
|
2017-09-28 12:29:42 +00:00
|
|
|
}
|
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
|
|
|
|
if (!eth_dev)
|
|
|
|
return -ENOMEM;
|
2017-09-28 12:29:42 +00:00
|
|
|
|
2020-07-07 09:22:26 +00:00
|
|
|
eth_dev->data->dev_private =
|
|
|
|
rte_zmalloc("ethdev private structure",
|
|
|
|
sizeof(struct dpaa_if),
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (!eth_dev->data->dev_private) {
|
|
|
|
DPAA_PMD_ERR("Cannot allocate memzone for port data");
|
|
|
|
rte_eth_dev_release_port(eth_dev);
|
|
|
|
return -ENOMEM;
|
2017-09-28 12:29:42 +00:00
|
|
|
}
|
2020-07-07 09:22:26 +00:00
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
eth_dev->device = &dpaa_dev->device;
|
|
|
|
dpaa_dev->eth_dev = eth_dev;
|
|
|
|
|
2020-07-07 09:22:24 +00:00
|
|
|
qman_ern_register_cb(dpaa_free_mbuf);
|
|
|
|
|
2020-07-07 09:22:28 +00:00
|
|
|
if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
|
|
|
|
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
/* Invoke PMD device initialization function */
|
|
|
|
diag = dpaa_dev_init(eth_dev);
|
2018-05-10 23:58:30 +00:00
|
|
|
if (diag == 0) {
|
|
|
|
rte_eth_dev_probing_finish(eth_dev);
|
2017-09-28 12:29:42 +00:00
|
|
|
return 0;
|
2018-05-10 23:58:30 +00:00
|
|
|
}
|
2017-09-28 12:29:42 +00:00
|
|
|
|
|
|
|
rte_eth_dev_release_port(eth_dev);
|
|
|
|
return diag;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *eth_dev;
|
2020-09-28 23:14:16 +00:00
|
|
|
int ret;
|
2017-09-28 12:29:42 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
eth_dev = dpaa_dev->eth_dev;
|
2020-09-28 23:14:16 +00:00
|
|
|
dpaa_eth_dev_close(eth_dev);
|
|
|
|
ret = rte_eth_dev_release_port(eth_dev);
|
2017-09-28 12:29:42 +00:00
|
|
|
|
2020-09-28 23:14:16 +00:00
|
|
|
return ret;
|
2017-09-28 12:29:42 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 08:39:25 +00:00
|
|
|
static void __attribute__((destructor(102))) dpaa_finish(void)
|
|
|
|
{
|
|
|
|
/* For secondary, primary will do all the cleanup */
|
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!(default_q || fmc_q)) {
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
|
|
|
if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[i];
|
|
|
|
struct dpaa_if *dpaa_intf =
|
|
|
|
dev->data->dev_private;
|
|
|
|
struct fman_if *fif =
|
|
|
|
dev->process_private;
|
|
|
|
if (dpaa_intf->port_handle)
|
|
|
|
if (dpaa_fm_deconfig(dpaa_intf, fif))
|
|
|
|
DPAA_PMD_WARN("DPAA FM "
|
|
|
|
"deconfig failed\n");
|
2020-09-04 08:39:28 +00:00
|
|
|
if (fif->num_profiles) {
|
|
|
|
if (dpaa_port_vsp_cleanup(dpaa_intf,
|
|
|
|
fif))
|
|
|
|
DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
|
|
|
|
}
|
2020-09-04 08:39:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (is_global_init)
|
|
|
|
if (dpaa_fm_term())
|
|
|
|
DPAA_PMD_WARN("DPAA FM term failed\n");
|
|
|
|
|
|
|
|
is_global_init = 0;
|
|
|
|
|
|
|
|
DPAA_PMD_INFO("DPAA fman cleaned up");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-28 12:29:42 +00:00
|
|
|
static struct rte_dpaa_driver rte_dpaa_pmd = {
|
2020-07-07 09:22:28 +00:00
|
|
|
.drv_flags = RTE_DPAA_DRV_INTR_LSC,
|
2017-09-28 12:29:42 +00:00
|
|
|
.drv_type = FSL_DPAA_ETH,
|
|
|
|
.probe = rte_dpaa_probe,
|
|
|
|
.remove = rte_dpaa_remove,
|
|
|
|
};
|
|
|
|
|
|
|
|
RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
|
log: register with standardized names
Let's try to enforce the convention where most drivers use a pmd. logtype
with their class reflected in it, and libraries use a lib. logtype.
Introduce two new macros:
- RTE_LOG_REGISTER_DEFAULT can be used when a single logtype is
used in a component. It is associated to the default name provided
by the build system,
- RTE_LOG_REGISTER_SUFFIX can be used when multiple logtypes are used,
and then the passed name is appended to the default name,
RTE_LOG_REGISTER is left untouched for existing external users
and for components that do not comply with the convention.
There is a new Meson variable log_prefix to adapt the default name
for baseband (pmd.bb.), bus (no pmd.) and mempool (no pmd.) classes.
Note: achieved with below commands + reverted change on net/bonding +
edits on crypto/virtio, compress/mlx5, regex/mlx5
$ git grep -l RTE_LOG_REGISTER drivers/ |
while read file; do
pattern=${file##drivers/};
class=${pattern%%/*};
pattern=${pattern#$class/};
drv=${pattern%%/*};
case "$class" in
baseband) pattern=pmd.bb.$drv;;
bus) pattern=bus.$drv;;
mempool) pattern=mempool.$drv;;
*) pattern=pmd.$class.$drv;;
esac
sed -i -e 's/RTE_LOG_REGISTER(\(.*\), '$pattern',/RTE_LOG_REGISTER_DEFAULT(\1,/' $file;
sed -i -e 's/RTE_LOG_REGISTER(\(.*\), '$pattern'\.\(.*\),/RTE_LOG_REGISTER_SUFFIX(\1, \2,/' $file;
done
$ git grep -l RTE_LOG_REGISTER lib/ |
while read file; do
pattern=${file##lib/};
pattern=lib.${pattern%%/*};
sed -i -e 's/RTE_LOG_REGISTER(\(.*\), '$pattern',/RTE_LOG_REGISTER_DEFAULT(\1,/' $file;
sed -i -e 's/RTE_LOG_REGISTER(\(.*\), '$pattern'\.\(.*\),/RTE_LOG_REGISTER_SUFFIX(\1, \2,/' $file;
done
Signed-off-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2021-04-26 12:51:08 +00:00
|
|
|
RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);
|