d853d24b27
Many user (e.g. telemetry) invokes rte_eth_xstats_get(port_id, NULL, 0)
to retrieve the required number of elements, but currently mvpp2 PMD
returns zero when xstats is null.
Remove the logic of "return zero when xstats is NULL", and add the logic
of "return the required number of entries when n is lower than the
required number of entries".
Fixes: a77b5378cd
("net/mrvl: add extended statistics")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
3319 lines
77 KiB
C
3319 lines
77 KiB
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright(c) 2017-2021 Marvell International Ltd.
|
|
* Copyright(c) 2017-2021 Semihalf.
|
|
* All rights reserved.
|
|
*/
|
|
|
|
#include <rte_string_fns.h>
|
|
#include <ethdev_driver.h>
|
|
#include <rte_kvargs.h>
|
|
#include <rte_log.h>
|
|
#include <rte_malloc.h>
|
|
#include <rte_bus_vdev.h>
|
|
|
|
#include <fcntl.h>
|
|
#include <linux/ethtool.h>
|
|
#include <linux/sockios.h>
|
|
#include <net/if.h>
|
|
#include <net/if_arp.h>
|
|
#include <sys/ioctl.h>
|
|
#include <sys/socket.h>
|
|
#include <sys/stat.h>
|
|
#include <sys/types.h>
|
|
|
|
#include <rte_mvep_common.h>
|
|
#include "mrvl_ethdev.h"
|
|
#include "mrvl_qos.h"
|
|
#include "mrvl_flow.h"
|
|
#include "mrvl_mtr.h"
|
|
#include "mrvl_tm.h"
|
|
|
|
/* bitmask with reserved hifs */
|
|
#define MRVL_MUSDK_HIFS_RESERVED 0x0F
|
|
/* bitmask with reserved bpools */
|
|
#define MRVL_MUSDK_BPOOLS_RESERVED 0x07
|
|
/* bitmask with reserved kernel RSS tables */
|
|
#define MRVL_MUSDK_RSS_RESERVED 0x0F
|
|
/* maximum number of available hifs */
|
|
#define MRVL_MUSDK_HIFS_MAX 9
|
|
|
|
/* prefetch shift */
|
|
#define MRVL_MUSDK_PREFETCH_SHIFT 2
|
|
|
|
/* TCAM has 25 entries reserved for uc/mc filter entries
|
|
* + 1 for primary mac address
|
|
*/
|
|
#define MRVL_MAC_ADDRS_MAX (1 + 25)
|
|
#define MRVL_MATCH_LEN 16
|
|
#define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
|
|
/* Maximum allowable packet size */
|
|
#define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
|
|
|
|
#define MRVL_IFACE_NAME_ARG "iface"
|
|
#define MRVL_CFG_ARG "cfg"
|
|
|
|
#define MRVL_ARP_LENGTH 28
|
|
|
|
#define MRVL_COOKIE_ADDR_INVALID ~0ULL
|
|
#define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
|
|
|
|
/** Port Rx offload capabilities */
|
|
#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
|
|
RTE_ETH_RX_OFFLOAD_CHECKSUM)
|
|
|
|
/** Port Tx offloads capabilities */
|
|
#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
|
|
RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
|
|
RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
|
|
#define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
|
|
RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
|
|
|
|
#define MRVL_TX_PKT_OFFLOADS (RTE_MBUF_F_TX_IP_CKSUM | \
|
|
RTE_MBUF_F_TX_TCP_CKSUM | \
|
|
RTE_MBUF_F_TX_UDP_CKSUM)
|
|
|
|
static const char * const valid_args[] = {
|
|
MRVL_IFACE_NAME_ARG,
|
|
MRVL_CFG_ARG,
|
|
NULL
|
|
};
|
|
|
|
static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
|
|
static struct pp2_hif *hifs[RTE_MAX_LCORE];
|
|
static int used_bpools[PP2_NUM_PKT_PROC] = {
|
|
[0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
|
|
};
|
|
|
|
static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
|
|
static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
|
|
static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
|
|
static int dummy_pool_id[PP2_NUM_PKT_PROC];
|
|
struct pp2_bpool *dummy_pool[PP2_NUM_PKT_PROC] = {0};
|
|
|
|
struct mrvl_ifnames {
|
|
const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
|
|
int idx;
|
|
};
|
|
|
|
/*
|
|
* To use buffer harvesting based on loopback port shadow queue structure
|
|
* was introduced for buffers information bookkeeping.
|
|
*
|
|
* Before sending the packet, related buffer information (pp2_buff_inf) is
|
|
* stored in shadow queue. After packet is transmitted no longer used
|
|
* packet buffer is released back to it's original hardware pool,
|
|
* on condition it originated from interface.
|
|
* In case it was generated by application itself i.e: mbuf->port field is
|
|
* 0xff then its released to software mempool.
|
|
*/
|
|
struct mrvl_shadow_txq {
|
|
int head; /* write index - used when sending buffers */
|
|
int tail; /* read index - used when releasing buffers */
|
|
u16 size; /* queue occupied size */
|
|
u16 num_to_release; /* number of descriptors sent, that can be
|
|
* released
|
|
*/
|
|
struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
|
|
};
|
|
|
|
struct mrvl_rxq {
|
|
struct mrvl_priv *priv;
|
|
struct rte_mempool *mp;
|
|
int queue_id;
|
|
int port_id;
|
|
int cksum_enabled;
|
|
uint64_t bytes_recv;
|
|
uint64_t drop_mac;
|
|
};
|
|
|
|
struct mrvl_txq {
|
|
struct mrvl_priv *priv;
|
|
int queue_id;
|
|
int port_id;
|
|
uint64_t bytes_sent;
|
|
struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
|
|
int tx_deferred_start;
|
|
};
|
|
|
|
static int mrvl_lcore_first;
|
|
static int mrvl_lcore_last;
|
|
static int mrvl_dev_num;
|
|
|
|
static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
|
|
static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
|
|
struct pp2_hif *hif, unsigned int core_id,
|
|
struct mrvl_shadow_txq *sq, int qid, int force);
|
|
|
|
static uint16_t mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
|
|
uint16_t nb_pkts);
|
|
static uint16_t mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
|
|
uint16_t nb_pkts);
|
|
static int rte_pmd_mrvl_remove(struct rte_vdev_device *vdev);
|
|
static void mrvl_deinit_pp2(void);
|
|
static void mrvl_deinit_hifs(void);
|
|
|
|
static int
|
|
mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
|
|
uint32_t index, uint32_t vmdq __rte_unused);
|
|
static int
|
|
mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
|
|
static int
|
|
mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
|
|
static int mrvl_promiscuous_enable(struct rte_eth_dev *dev);
|
|
static int mrvl_allmulticast_enable(struct rte_eth_dev *dev);
|
|
static int
|
|
mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
|
|
|
|
#define MRVL_XSTATS_TBL_ENTRY(name) { \
|
|
#name, offsetof(struct pp2_ppio_statistics, name), \
|
|
sizeof(((struct pp2_ppio_statistics *)0)->name) \
|
|
}
|
|
|
|
/* Table with xstats data */
|
|
static struct {
|
|
const char *name;
|
|
unsigned int offset;
|
|
unsigned int size;
|
|
} mrvl_xstats_tbl[] = {
|
|
MRVL_XSTATS_TBL_ENTRY(rx_bytes),
|
|
MRVL_XSTATS_TBL_ENTRY(rx_packets),
|
|
MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
|
|
MRVL_XSTATS_TBL_ENTRY(rx_errors),
|
|
MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
|
|
MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
|
|
MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
|
|
MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
|
|
MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
|
|
MRVL_XSTATS_TBL_ENTRY(tx_bytes),
|
|
MRVL_XSTATS_TBL_ENTRY(tx_packets),
|
|
MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
|
|
MRVL_XSTATS_TBL_ENTRY(tx_errors)
|
|
};
|
|
|
|
static inline int
|
|
mrvl_reserve_bit(int *bitmap, int max)
|
|
{
|
|
int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
|
|
|
|
if (n >= max)
|
|
return -1;
|
|
|
|
*bitmap |= 1 << n;
|
|
|
|
return n;
|
|
}
|
|
|
|
static int
|
|
mrvl_pp2_fixup_init(void)
|
|
{
|
|
struct pp2_bpool_params bpool_params;
|
|
char name[15];
|
|
int err, i;
|
|
|
|
memset(dummy_pool, 0, sizeof(dummy_pool));
|
|
for (i = 0; i < pp2_get_num_inst(); i++) {
|
|
dummy_pool_id[i] = mrvl_reserve_bit(&used_bpools[i],
|
|
PP2_BPOOL_NUM_POOLS);
|
|
if (dummy_pool_id[i] < 0) {
|
|
MRVL_LOG(ERR, "Can't find free pool\n");
|
|
return -1;
|
|
}
|
|
|
|
memset(name, 0, sizeof(name));
|
|
snprintf(name, sizeof(name), "pool-%d:%d", i, dummy_pool_id[i]);
|
|
memset(&bpool_params, 0, sizeof(bpool_params));
|
|
bpool_params.match = name;
|
|
bpool_params.buff_len = MRVL_PKT_OFFS;
|
|
bpool_params.dummy_short_pool = 1;
|
|
err = pp2_bpool_init(&bpool_params, &dummy_pool[i]);
|
|
if (err != 0 || !dummy_pool[i]) {
|
|
MRVL_LOG(ERR, "BPool init failed!\n");
|
|
used_bpools[i] &= ~(1 << dummy_pool_id[i]);
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* Initialize packet processor.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_init_pp2(void)
|
|
{
|
|
struct pp2_init_params init_params;
|
|
int err;
|
|
|
|
memset(&init_params, 0, sizeof(init_params));
|
|
init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
|
|
init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
|
|
init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
|
|
if (mrvl_cfg && mrvl_cfg->pp2_cfg.prs_udfs.num_udfs)
|
|
memcpy(&init_params.prs_udfs, &mrvl_cfg->pp2_cfg.prs_udfs,
|
|
sizeof(struct pp2_parse_udfs));
|
|
err = pp2_init(&init_params);
|
|
if (err != 0) {
|
|
MRVL_LOG(ERR, "PP2 init failed");
|
|
return -1;
|
|
}
|
|
|
|
err = mrvl_pp2_fixup_init();
|
|
if (err != 0) {
|
|
MRVL_LOG(ERR, "PP2 fixup init failed");
|
|
return -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
mrvl_pp2_fixup_deinit(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < PP2_NUM_PKT_PROC; i++) {
|
|
if (!dummy_pool[i])
|
|
continue;
|
|
pp2_bpool_deinit(dummy_pool[i]);
|
|
used_bpools[i] &= ~(1 << dummy_pool_id[i]);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Deinitialize packet processor.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static void
|
|
mrvl_deinit_pp2(void)
|
|
{
|
|
mrvl_pp2_fixup_deinit();
|
|
pp2_deinit();
|
|
}
|
|
|
|
static inline void
|
|
mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf)
|
|
{
|
|
sq->ent[sq->head].buff.cookie = (uint64_t)buf;
|
|
sq->ent[sq->head].buff.addr = buf ?
|
|
rte_mbuf_data_iova_default(buf) : 0;
|
|
|
|
sq->ent[sq->head].bpool =
|
|
(unlikely(!buf || buf->port >= RTE_MAX_ETHPORTS ||
|
|
buf->refcnt > 1)) ? NULL :
|
|
mrvl_port_to_bpool_lookup[buf->port];
|
|
|
|
sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
|
|
sq->size++;
|
|
}
|
|
|
|
/**
|
|
* Deinitialize per-lcore MUSDK hardware interfaces (hifs).
|
|
*/
|
|
static void
|
|
mrvl_deinit_hifs(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
|
|
if (hifs[i])
|
|
pp2_hif_deinit(hifs[i]);
|
|
}
|
|
used_hifs = MRVL_MUSDK_HIFS_RESERVED;
|
|
memset(hifs, 0, sizeof(hifs));
|
|
}
|
|
|
|
static inline void
|
|
mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf)
|
|
{
|
|
pp2_ppio_outq_desc_reset(desc);
|
|
pp2_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));
|
|
pp2_ppio_outq_desc_set_pkt_offset(desc, 0);
|
|
pp2_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));
|
|
}
|
|
|
|
static inline int
|
|
mrvl_get_bpool_size(int pp2_id, int pool_id)
|
|
{
|
|
int i;
|
|
int size = 0;
|
|
|
|
for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
|
|
size += mrvl_port_bpool_size[pp2_id][pool_id][i];
|
|
|
|
return size;
|
|
}
|
|
|
|
static int
|
|
mrvl_init_hif(int core_id)
|
|
{
|
|
struct pp2_hif_params params;
|
|
char match[MRVL_MATCH_LEN];
|
|
int ret;
|
|
|
|
ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
|
|
if (ret < 0) {
|
|
MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
|
|
return ret;
|
|
}
|
|
|
|
snprintf(match, sizeof(match), "hif-%d", ret);
|
|
memset(¶ms, 0, sizeof(params));
|
|
params.match = match;
|
|
params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
|
|
ret = pp2_hif_init(¶ms, &hifs[core_id]);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline struct pp2_hif*
|
|
mrvl_get_hif(struct mrvl_priv *priv, int core_id)
|
|
{
|
|
int ret;
|
|
|
|
if (likely(hifs[core_id] != NULL))
|
|
return hifs[core_id];
|
|
|
|
rte_spinlock_lock(&priv->lock);
|
|
|
|
ret = mrvl_init_hif(core_id);
|
|
if (ret < 0) {
|
|
MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
|
|
goto out;
|
|
}
|
|
|
|
if (core_id < mrvl_lcore_first)
|
|
mrvl_lcore_first = core_id;
|
|
|
|
if (core_id > mrvl_lcore_last)
|
|
mrvl_lcore_last = core_id;
|
|
out:
|
|
rte_spinlock_unlock(&priv->lock);
|
|
|
|
return hifs[core_id];
|
|
}
|
|
|
|
/**
|
|
* Set tx burst function according to offload flag
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*/
|
|
static void
|
|
mrvl_set_tx_function(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
|
|
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
|
|
if (priv->multiseg) {
|
|
RTE_LOG(INFO, PMD, "Using multi-segment tx callback\n");
|
|
dev->tx_pkt_burst = mrvl_tx_sg_pkt_burst;
|
|
} else {
|
|
RTE_LOG(INFO, PMD, "Using single-segment tx callback\n");
|
|
dev->tx_pkt_burst = mrvl_tx_pkt_burst;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Configure rss based on dpdk rss configuration.
|
|
*
|
|
* @param priv
|
|
* Pointer to private structure.
|
|
* @param rss_conf
|
|
* Pointer to RSS configuration.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
|
|
{
|
|
if (rss_conf->rss_key)
|
|
MRVL_LOG(WARNING, "Changing hash key is not supported");
|
|
|
|
if (rss_conf->rss_hf == 0) {
|
|
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
|
|
} else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
|
|
priv->ppio_params.inqs_params.hash_type =
|
|
PP2_PPIO_HASH_T_2_TUPLE;
|
|
} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
|
|
priv->ppio_params.inqs_params.hash_type =
|
|
PP2_PPIO_HASH_T_5_TUPLE;
|
|
priv->rss_hf_tcp = 1;
|
|
} else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
|
|
priv->ppio_params.inqs_params.hash_type =
|
|
PP2_PPIO_HASH_T_5_TUPLE;
|
|
priv->rss_hf_tcp = 0;
|
|
} else {
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* Ethernet device configuration.
|
|
*
|
|
* Prepare the driver for a given number of TX and RX queues and
|
|
* configure RSS.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_dev_configure(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (priv->ppio) {
|
|
MRVL_LOG(INFO, "Device reconfiguration is not supported");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
|
|
dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
|
|
MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
|
|
dev->data->dev_conf.rxmode.mq_mode);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (dev->data->dev_conf.rxmode.split_hdr_size) {
|
|
MRVL_LOG(INFO, "Split headers not supported");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (dev->data->dev_conf.rxmode.mtu > priv->max_mtu) {
|
|
MRVL_LOG(ERR, "MTU %u is larger than max_mtu %u\n",
|
|
dev->data->dev_conf.rxmode.mtu,
|
|
priv->max_mtu);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
|
|
priv->multiseg = 1;
|
|
|
|
ret = mrvl_configure_rxqs(priv, dev->data->port_id,
|
|
dev->data->nb_rx_queues);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ret = mrvl_configure_txqs(priv, dev->data->port_id,
|
|
dev->data->nb_tx_queues);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
|
|
priv->ppio_params.maintain_stats = 1;
|
|
priv->nb_rx_queues = dev->data->nb_rx_queues;
|
|
|
|
ret = mrvl_tm_init(dev);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
if (dev->data->nb_rx_queues == 1 &&
|
|
dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
|
|
MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
|
|
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
|
|
priv->configured = 1;
|
|
return 0;
|
|
}
|
|
|
|
ret = mrvl_configure_rss(priv,
|
|
&dev->data->dev_conf.rx_adv_conf.rss_conf);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
priv->configured = 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to change the MTU.
|
|
*
|
|
* Setting the MTU affects hardware MRU (packets larger than the MRU
|
|
* will be dropped).
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param mtu
|
|
* New MTU.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
uint16_t mru;
|
|
uint16_t mbuf_data_size = 0; /* SW buffer size */
|
|
int ret;
|
|
|
|
mru = MRVL_PP2_MTU_TO_MRU(mtu);
|
|
/*
|
|
* min_rx_buf_size is equal to mbuf data size
|
|
* if pmd didn't set it differently
|
|
*/
|
|
mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
|
|
/* Prevent PMD from:
|
|
* - setting mru greater than the mbuf size resulting in
|
|
* hw and sw buffer size mismatch
|
|
* - setting mtu that requires the support of scattered packets
|
|
* when this feature has not been enabled/supported so far
|
|
* (TODO check scattered_rx flag here once scattered RX is supported).
|
|
*/
|
|
if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) {
|
|
mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS;
|
|
mtu = MRVL_PP2_MRU_TO_MTU(mru);
|
|
MRVL_LOG(WARNING, "MTU too big, max MTU possible limited "
|
|
"by current mbuf size: %u. Set MTU to %u, MRU to %u",
|
|
mbuf_data_size, mtu, mru);
|
|
}
|
|
|
|
if (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {
|
|
MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
ret = pp2_ppio_set_mru(priv->ppio, mru);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to change MRU");
|
|
return ret;
|
|
}
|
|
|
|
ret = pp2_ppio_set_mtu(priv->ppio, mtu);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to change MTU");
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to bring the link up.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_dev_set_link_up(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (!priv->ppio) {
|
|
dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
|
|
return 0;
|
|
}
|
|
|
|
ret = pp2_ppio_enable(priv->ppio);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/*
|
|
* mtu/mru can be updated if pp2_ppio_enable() was called at least once
|
|
* as pp2_ppio_enable() changes port->t_mode from default 0 to
|
|
* PP2_TRAFFIC_INGRESS_EGRESS.
|
|
*
|
|
* Set mtu to default DPDK value here.
|
|
*/
|
|
ret = mrvl_mtu_set(dev, dev->data->mtu);
|
|
if (ret) {
|
|
pp2_ppio_disable(priv->ppio);
|
|
return ret;
|
|
}
|
|
|
|
dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to bring the link down.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_dev_set_link_down(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (!priv->ppio) {
|
|
dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
|
|
return 0;
|
|
}
|
|
ret = pp2_ppio_disable(priv->ppio);
|
|
if (ret)
|
|
return ret;
|
|
|
|
dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to start tx queue.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param queue_id
|
|
* Transmit queue index.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (!priv)
|
|
return -EPERM;
|
|
|
|
/* passing 1 enables given tx queue */
|
|
ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
|
|
return ret;
|
|
}
|
|
|
|
dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to stop tx queue.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param queue_id
|
|
* Transmit queue index.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (!priv->ppio)
|
|
return -EPERM;
|
|
|
|
/* passing 0 disables given tx queue */
|
|
ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
|
|
return ret;
|
|
}
|
|
|
|
dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* Populate VLAN Filter configuration.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param on
|
|
* Toggle filter.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int mrvl_populate_vlan_table(struct rte_eth_dev *dev, int on)
|
|
{
|
|
uint32_t j;
|
|
int ret;
|
|
struct rte_vlan_filter_conf *vfc;
|
|
|
|
vfc = &dev->data->vlan_filter_conf;
|
|
for (j = 0; j < RTE_DIM(vfc->ids); j++) {
|
|
uint64_t vlan;
|
|
uint64_t vbit;
|
|
uint64_t ids = vfc->ids[j];
|
|
|
|
if (ids == 0)
|
|
continue;
|
|
|
|
while (ids) {
|
|
vlan = 64 * j;
|
|
/* count trailing zeroes */
|
|
vbit = ~ids & (ids - 1);
|
|
/* clear least significant bit set */
|
|
ids ^= (ids ^ (ids - 1)) ^ vbit;
|
|
for (; vbit; vlan++)
|
|
vbit >>= 1;
|
|
ret = mrvl_vlan_filter_set(dev, vlan, on);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to setup VLAN filter\n");
|
|
return ret;
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to start the device.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative errno value on failure.
|
|
*/
|
|
static int
|
|
mrvl_dev_start(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
char match[MRVL_MATCH_LEN];
|
|
int ret = 0, i, def_init_size;
|
|
struct rte_ether_addr *mac_addr;
|
|
|
|
if (priv->ppio)
|
|
return mrvl_dev_set_link_up(dev);
|
|
|
|
snprintf(match, sizeof(match), "ppio-%d:%d",
|
|
priv->pp_id, priv->ppio_id);
|
|
priv->ppio_params.match = match;
|
|
priv->ppio_params.eth_start_hdr = PP2_PPIO_HDR_ETH;
|
|
priv->forward_bad_frames = 0;
|
|
priv->fill_bpool_buffs = MRVL_BURST_SIZE;
|
|
|
|
if (mrvl_cfg) {
|
|
priv->ppio_params.eth_start_hdr =
|
|
mrvl_cfg->port[dev->data->port_id].eth_start_hdr;
|
|
priv->forward_bad_frames =
|
|
mrvl_cfg->port[dev->data->port_id].forward_bad_frames;
|
|
priv->fill_bpool_buffs =
|
|
mrvl_cfg->port[dev->data->port_id].fill_bpool_buffs;
|
|
}
|
|
|
|
/*
|
|
* Calculate the minimum bpool size for refill feature as follows:
|
|
* 2 default burst sizes multiply by number of rx queues.
|
|
* If the bpool size will be below this value, new buffers will
|
|
* be added to the pool.
|
|
*/
|
|
priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
|
|
|
|
/* In case initial bpool size configured in queues setup is
|
|
* smaller than minimum size add more buffers
|
|
*/
|
|
def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
|
|
if (priv->bpool_init_size < def_init_size) {
|
|
int buffs_to_add = def_init_size - priv->bpool_init_size;
|
|
|
|
priv->bpool_init_size += buffs_to_add;
|
|
ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
|
|
if (ret)
|
|
MRVL_LOG(ERR, "Failed to add buffers to bpool");
|
|
}
|
|
|
|
/*
|
|
* Calculate the maximum bpool size for refill feature as follows:
|
|
* maximum number of descriptors in rx queue multiply by number
|
|
* of rx queues plus minimum bpool size.
|
|
* In case the bpool size will exceed this value, superfluous buffers
|
|
* will be removed
|
|
*/
|
|
priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
|
|
priv->bpool_min_size;
|
|
|
|
ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to init ppio");
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* In case there are some some stale uc/mc mac addresses flush them
|
|
* here. It cannot be done during mrvl_dev_close() as port information
|
|
* is already gone at that point (due to pp2_ppio_deinit() in
|
|
* mrvl_dev_stop()).
|
|
*/
|
|
if (!priv->uc_mc_flushed) {
|
|
ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
|
|
if (ret) {
|
|
MRVL_LOG(ERR,
|
|
"Failed to flush uc/mc filter list");
|
|
goto out;
|
|
}
|
|
priv->uc_mc_flushed = 1;
|
|
}
|
|
|
|
ret = mrvl_mtu_set(dev, dev->data->mtu);
|
|
if (ret)
|
|
MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu);
|
|
|
|
if (!rte_is_zero_ether_addr(&dev->data->mac_addrs[0]))
|
|
mrvl_mac_addr_set(dev, &dev->data->mac_addrs[0]);
|
|
|
|
for (i = 1; i < MRVL_MAC_ADDRS_MAX; i++) {
|
|
mac_addr = &dev->data->mac_addrs[i];
|
|
|
|
/* skip zero address */
|
|
if (rte_is_zero_ether_addr(mac_addr))
|
|
continue;
|
|
|
|
mrvl_mac_addr_add(dev, mac_addr, i, 0);
|
|
}
|
|
|
|
if (dev->data->all_multicast == 1)
|
|
mrvl_allmulticast_enable(dev);
|
|
|
|
if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
|
|
ret = mrvl_populate_vlan_table(dev, 1);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to populate VLAN table");
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
/* For default QoS config, don't start classifier. */
|
|
if (mrvl_cfg &&
|
|
mrvl_cfg->port[dev->data->port_id].use_qos_global_defaults == 0) {
|
|
ret = mrvl_start_qos_mapping(priv);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to setup QoS mapping");
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
ret = pp2_ppio_set_loopback(priv->ppio, dev->data->dev_conf.lpbk_mode);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to set loopback");
|
|
goto out;
|
|
}
|
|
|
|
if (dev->data->promiscuous == 1)
|
|
mrvl_promiscuous_enable(dev);
|
|
|
|
if (priv->flow_ctrl) {
|
|
ret = mrvl_flow_ctrl_set(dev, &priv->fc_conf);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to configure flow control");
|
|
goto out;
|
|
}
|
|
priv->flow_ctrl = 0;
|
|
}
|
|
|
|
if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
|
|
ret = mrvl_dev_set_link_up(dev);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to set link up");
|
|
dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
/* start tx queues */
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
struct mrvl_txq *txq = dev->data->tx_queues[i];
|
|
|
|
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
|
|
|
|
if (!txq->tx_deferred_start)
|
|
continue;
|
|
|
|
/*
|
|
* All txqs are started by default. Stop them
|
|
* so that tx_deferred_start works as expected.
|
|
*/
|
|
ret = mrvl_tx_queue_stop(dev, i);
|
|
if (ret)
|
|
goto out;
|
|
}
|
|
|
|
mrvl_flow_init(dev);
|
|
mrvl_mtr_init(dev);
|
|
mrvl_set_tx_function(dev);
|
|
|
|
return 0;
|
|
out:
|
|
MRVL_LOG(ERR, "Failed to start device");
|
|
pp2_ppio_deinit(priv->ppio);
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* Flush receive queues.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*/
|
|
static void
|
|
mrvl_flush_rx_queues(struct rte_eth_dev *dev)
|
|
{
|
|
int i;
|
|
|
|
MRVL_LOG(INFO, "Flushing rx queues");
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
int ret, num;
|
|
|
|
do {
|
|
struct mrvl_rxq *q = dev->data->rx_queues[i];
|
|
struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
|
|
|
|
num = MRVL_PP2_RXD_MAX;
|
|
ret = pp2_ppio_recv(q->priv->ppio,
|
|
q->priv->rxq_map[q->queue_id].tc,
|
|
q->priv->rxq_map[q->queue_id].inq,
|
|
descs, (uint16_t *)&num);
|
|
} while (ret == 0 && num);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Flush transmit shadow queues.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*/
|
|
static void
|
|
mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
|
|
{
|
|
int i, j;
|
|
struct mrvl_txq *txq;
|
|
|
|
MRVL_LOG(INFO, "Flushing tx shadow queues");
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
txq = (struct mrvl_txq *)dev->data->tx_queues[i];
|
|
|
|
for (j = 0; j < RTE_MAX_LCORE; j++) {
|
|
struct mrvl_shadow_txq *sq;
|
|
|
|
if (!hifs[j])
|
|
continue;
|
|
|
|
sq = &txq->shadow_txqs[j];
|
|
mrvl_free_sent_buffers(txq->priv->ppio,
|
|
hifs[j], j, sq, txq->queue_id, 1);
|
|
while (sq->tail != sq->head) {
|
|
uint64_t addr = cookie_addr_high |
|
|
sq->ent[sq->tail].buff.cookie;
|
|
rte_pktmbuf_free(
|
|
(struct rte_mbuf *)addr);
|
|
sq->tail = (sq->tail + 1) &
|
|
MRVL_PP2_TX_SHADOWQ_MASK;
|
|
}
|
|
memset(sq, 0, sizeof(*sq));
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Flush hardware bpool (buffer-pool).
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*/
|
|
static void
|
|
mrvl_flush_bpool(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
struct pp2_hif *hif;
|
|
uint32_t num;
|
|
int ret;
|
|
unsigned int core_id = rte_lcore_id();
|
|
|
|
if (core_id == LCORE_ID_ANY)
|
|
core_id = rte_get_main_lcore();
|
|
|
|
hif = mrvl_get_hif(priv, core_id);
|
|
|
|
ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to get bpool buffers number");
|
|
return;
|
|
}
|
|
|
|
while (num--) {
|
|
struct pp2_buff_inf inf;
|
|
uint64_t addr;
|
|
|
|
ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
|
|
if (ret)
|
|
break;
|
|
|
|
addr = cookie_addr_high | inf.cookie;
|
|
rte_pktmbuf_free((struct rte_mbuf *)addr);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to stop the device.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*/
|
|
static int
|
|
mrvl_dev_stop(struct rte_eth_dev *dev)
|
|
{
|
|
return mrvl_dev_set_link_down(dev);
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to close the device.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*/
|
|
static int
|
|
mrvl_dev_close(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
size_t i;
|
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
|
return 0;
|
|
|
|
mrvl_flush_rx_queues(dev);
|
|
mrvl_flush_tx_shadow_queues(dev);
|
|
mrvl_flow_deinit(dev);
|
|
mrvl_mtr_deinit(dev);
|
|
|
|
for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
|
|
struct pp2_ppio_tc_params *tc_params =
|
|
&priv->ppio_params.inqs_params.tcs_params[i];
|
|
|
|
if (tc_params->inqs_params) {
|
|
rte_free(tc_params->inqs_params);
|
|
tc_params->inqs_params = NULL;
|
|
}
|
|
}
|
|
|
|
if (priv->cls_tbl) {
|
|
pp2_cls_tbl_deinit(priv->cls_tbl);
|
|
priv->cls_tbl = NULL;
|
|
}
|
|
|
|
if (priv->qos_tbl) {
|
|
pp2_cls_qos_tbl_deinit(priv->qos_tbl);
|
|
priv->qos_tbl = NULL;
|
|
}
|
|
|
|
mrvl_flush_bpool(dev);
|
|
mrvl_tm_deinit(dev);
|
|
|
|
if (priv->ppio) {
|
|
pp2_ppio_deinit(priv->ppio);
|
|
priv->ppio = NULL;
|
|
}
|
|
|
|
/* policer must be released after ppio deinitialization */
|
|
if (priv->default_policer) {
|
|
pp2_cls_plcr_deinit(priv->default_policer);
|
|
priv->default_policer = NULL;
|
|
}
|
|
|
|
|
|
if (priv->bpool) {
|
|
pp2_bpool_deinit(priv->bpool);
|
|
used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
|
|
priv->bpool = NULL;
|
|
}
|
|
|
|
mrvl_dev_num--;
|
|
|
|
if (mrvl_dev_num == 0) {
|
|
MRVL_LOG(INFO, "Perform MUSDK deinit");
|
|
mrvl_deinit_hifs();
|
|
mrvl_deinit_pp2();
|
|
rte_mvep_deinit(MVEP_MOD_T_PP2);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to retrieve physical link information.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param wait_to_complete
|
|
* Wait for request completion (ignored).
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
|
|
{
|
|
/*
|
|
* TODO
|
|
* once MUSDK provides necessary API use it here
|
|
*/
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
struct ethtool_cmd edata;
|
|
struct ifreq req;
|
|
int ret, fd, link_up;
|
|
|
|
if (!priv->ppio)
|
|
return -EPERM;
|
|
|
|
edata.cmd = ETHTOOL_GSET;
|
|
|
|
strcpy(req.ifr_name, dev->data->name);
|
|
req.ifr_data = (void *)&edata;
|
|
|
|
fd = socket(AF_INET, SOCK_DGRAM, 0);
|
|
if (fd == -1)
|
|
return -EFAULT;
|
|
|
|
ret = ioctl(fd, SIOCETHTOOL, &req);
|
|
if (ret == -1) {
|
|
close(fd);
|
|
return -EFAULT;
|
|
}
|
|
|
|
close(fd);
|
|
|
|
switch (ethtool_cmd_speed(&edata)) {
|
|
case SPEED_10:
|
|
dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
|
|
break;
|
|
case SPEED_100:
|
|
dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
|
|
break;
|
|
case SPEED_1000:
|
|
dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
|
|
break;
|
|
case SPEED_2500:
|
|
dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
|
|
break;
|
|
case SPEED_10000:
|
|
dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
|
|
break;
|
|
default:
|
|
dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
|
|
}
|
|
|
|
dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
|
|
RTE_ETH_LINK_HALF_DUPLEX;
|
|
dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
|
|
RTE_ETH_LINK_FIXED;
|
|
pp2_ppio_get_link_state(priv->ppio, &link_up);
|
|
dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to enable promiscuous mode.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_promiscuous_enable(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (priv->isolated)
|
|
return -ENOTSUP;
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
ret = pp2_ppio_set_promisc(priv->ppio, 1);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to enable promiscuous mode");
|
|
return -EAGAIN;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to enable allmulti mode.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_allmulticast_enable(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (priv->isolated)
|
|
return -ENOTSUP;
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed enable all-multicast mode");
|
|
return -EAGAIN;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to disable promiscuous mode.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_promiscuous_disable(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (priv->isolated)
|
|
return -ENOTSUP;
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
ret = pp2_ppio_set_promisc(priv->ppio, 0);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to disable promiscuous mode");
|
|
return -EAGAIN;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to disable allmulticast mode.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_allmulticast_disable(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (priv->isolated)
|
|
return -ENOTSUP;
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to disable all-multicast mode");
|
|
return -EAGAIN;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to remove a MAC address.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param index
|
|
* MAC address index.
|
|
*/
|
|
static void
|
|
mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
char buf[RTE_ETHER_ADDR_FMT_SIZE];
|
|
int ret;
|
|
|
|
if (priv->isolated)
|
|
return;
|
|
|
|
if (!priv->ppio)
|
|
return;
|
|
|
|
ret = pp2_ppio_remove_mac_addr(priv->ppio,
|
|
dev->data->mac_addrs[index].addr_bytes);
|
|
if (ret) {
|
|
rte_ether_format_addr(buf, sizeof(buf),
|
|
&dev->data->mac_addrs[index]);
|
|
MRVL_LOG(ERR, "Failed to remove mac %s", buf);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to add a MAC address.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param mac_addr
|
|
* MAC address to register.
|
|
* @param index
|
|
* MAC address index.
|
|
* @param vmdq
|
|
* VMDq pool index to associate address with (unused).
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
|
|
uint32_t index, uint32_t vmdq __rte_unused)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
char buf[RTE_ETHER_ADDR_FMT_SIZE];
|
|
int ret;
|
|
|
|
if (priv->isolated)
|
|
return -ENOTSUP;
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
if (index == 0)
|
|
/* For setting index 0, mrvl_mac_addr_set() should be used.*/
|
|
return -1;
|
|
|
|
/*
|
|
* Maximum number of uc addresses can be tuned via kernel module mvpp2x
|
|
* parameter uc_filter_max. Maximum number of mc addresses is then
|
|
* MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
|
|
* 21 respectively.
|
|
*
|
|
* If more than uc_filter_max uc addresses were added to filter list
|
|
* then NIC will switch to promiscuous mode automatically.
|
|
*
|
|
* If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
|
|
* were added to filter list then NIC will switch to all-multicast mode
|
|
* automatically.
|
|
*/
|
|
ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
|
|
if (ret) {
|
|
rte_ether_format_addr(buf, sizeof(buf), mac_addr);
|
|
MRVL_LOG(ERR, "Failed to add mac %s", buf);
|
|
return -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to set the primary MAC address.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param mac_addr
|
|
* MAC address to register.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret;
|
|
|
|
if (priv->isolated)
|
|
return -ENOTSUP;
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
|
|
if (ret) {
|
|
char buf[RTE_ETHER_ADDR_FMT_SIZE];
|
|
rte_ether_format_addr(buf, sizeof(buf), mac_addr);
|
|
MRVL_LOG(ERR, "Failed to set mac to %s", buf);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get device statistics.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param stats
|
|
* Stats structure output buffer.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
struct pp2_ppio_statistics ppio_stats;
|
|
uint64_t drop_mac = 0;
|
|
unsigned int i, idx, ret;
|
|
|
|
if (!priv->ppio)
|
|
return -EPERM;
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
struct mrvl_rxq *rxq = dev->data->rx_queues[i];
|
|
struct pp2_ppio_inq_statistics rx_stats;
|
|
|
|
if (!rxq)
|
|
continue;
|
|
|
|
idx = rxq->queue_id;
|
|
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
|
|
MRVL_LOG(ERR,
|
|
"rx queue %d stats out of range (0 - %d)",
|
|
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
|
|
continue;
|
|
}
|
|
|
|
ret = pp2_ppio_inq_get_statistics(priv->ppio,
|
|
priv->rxq_map[idx].tc,
|
|
priv->rxq_map[idx].inq,
|
|
&rx_stats, 0);
|
|
if (unlikely(ret)) {
|
|
MRVL_LOG(ERR,
|
|
"Failed to update rx queue %d stats", idx);
|
|
break;
|
|
}
|
|
|
|
stats->q_ibytes[idx] = rxq->bytes_recv;
|
|
stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
|
|
stats->q_errors[idx] = rx_stats.drop_early +
|
|
rx_stats.drop_fullq +
|
|
rx_stats.drop_bm +
|
|
rxq->drop_mac;
|
|
stats->ibytes += rxq->bytes_recv;
|
|
drop_mac += rxq->drop_mac;
|
|
}
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
struct mrvl_txq *txq = dev->data->tx_queues[i];
|
|
struct pp2_ppio_outq_statistics tx_stats;
|
|
|
|
if (!txq)
|
|
continue;
|
|
|
|
idx = txq->queue_id;
|
|
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
|
|
MRVL_LOG(ERR,
|
|
"tx queue %d stats out of range (0 - %d)",
|
|
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
|
|
}
|
|
|
|
ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
|
|
&tx_stats, 0);
|
|
if (unlikely(ret)) {
|
|
MRVL_LOG(ERR,
|
|
"Failed to update tx queue %d stats", idx);
|
|
break;
|
|
}
|
|
|
|
stats->q_opackets[idx] = tx_stats.deq_desc;
|
|
stats->q_obytes[idx] = txq->bytes_sent;
|
|
stats->obytes += txq->bytes_sent;
|
|
}
|
|
|
|
ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
|
|
if (unlikely(ret)) {
|
|
MRVL_LOG(ERR, "Failed to update port statistics");
|
|
return ret;
|
|
}
|
|
|
|
stats->ipackets += ppio_stats.rx_packets - drop_mac;
|
|
stats->opackets += ppio_stats.tx_packets;
|
|
stats->imissed += ppio_stats.rx_fullq_dropped +
|
|
ppio_stats.rx_bm_dropped +
|
|
ppio_stats.rx_early_dropped +
|
|
ppio_stats.rx_fifo_dropped +
|
|
ppio_stats.rx_cls_dropped;
|
|
stats->ierrors = drop_mac;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to clear device statistics.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_stats_reset(struct rte_eth_dev *dev)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int i;
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
struct mrvl_rxq *rxq = dev->data->rx_queues[i];
|
|
|
|
pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
|
|
priv->rxq_map[i].inq, NULL, 1);
|
|
rxq->bytes_recv = 0;
|
|
rxq->drop_mac = 0;
|
|
}
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
struct mrvl_txq *txq = dev->data->tx_queues[i];
|
|
|
|
pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
|
|
txq->bytes_sent = 0;
|
|
}
|
|
|
|
return pp2_ppio_get_statistics(priv->ppio, NULL, 1);
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get extended statistics.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param stats
|
|
* Pointer to xstats table.
|
|
* @param n
|
|
* Number of entries in xstats table.
|
|
* @return
|
|
* Negative value on error, number of read xstats otherwise.
|
|
*/
|
|
static int
|
|
mrvl_xstats_get(struct rte_eth_dev *dev,
|
|
struct rte_eth_xstat *stats, unsigned int n)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
struct pp2_ppio_statistics ppio_stats;
|
|
unsigned int i, count;
|
|
|
|
count = RTE_DIM(mrvl_xstats_tbl);
|
|
if (n < count)
|
|
return count;
|
|
|
|
pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
|
|
for (i = 0; i < count; i++) {
|
|
uint64_t val;
|
|
|
|
if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
|
|
val = *(uint32_t *)((uint8_t *)&ppio_stats +
|
|
mrvl_xstats_tbl[i].offset);
|
|
else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
|
|
val = *(uint64_t *)((uint8_t *)&ppio_stats +
|
|
mrvl_xstats_tbl[i].offset);
|
|
else
|
|
return -EINVAL;
|
|
|
|
stats[i].id = i;
|
|
stats[i].value = val;
|
|
}
|
|
|
|
return count;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to reset extended statistics.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_xstats_reset(struct rte_eth_dev *dev)
|
|
{
|
|
return mrvl_stats_reset(dev);
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get extended statistics names.
|
|
*
|
|
* @param dev (unused)
|
|
* Pointer to Ethernet device structure.
|
|
* @param xstats_names
|
|
* Pointer to xstats names table.
|
|
* @param size
|
|
* Size of the xstats names table.
|
|
* @return
|
|
* Number of read names.
|
|
*/
|
|
static int
|
|
mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
|
|
struct rte_eth_xstat_name *xstats_names,
|
|
unsigned int size)
|
|
{
|
|
unsigned int i;
|
|
|
|
if (!xstats_names)
|
|
return RTE_DIM(mrvl_xstats_tbl);
|
|
|
|
for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
|
|
strlcpy(xstats_names[i].name, mrvl_xstats_tbl[i].name,
|
|
RTE_ETH_XSTATS_NAME_SIZE);
|
|
|
|
return size;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get information about the device.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure (unused).
|
|
* @param info
|
|
* Info structure output buffer.
|
|
*/
|
|
static int
|
|
mrvl_dev_infos_get(struct rte_eth_dev *dev,
|
|
struct rte_eth_dev_info *info)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
|
|
info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
|
|
|
|
info->speed_capa = RTE_ETH_LINK_SPEED_10M |
|
|
RTE_ETH_LINK_SPEED_100M |
|
|
RTE_ETH_LINK_SPEED_1G |
|
|
RTE_ETH_LINK_SPEED_2_5G |
|
|
RTE_ETH_LINK_SPEED_10G;
|
|
|
|
info->max_rx_queues = MRVL_PP2_RXQ_MAX;
|
|
info->max_tx_queues = MRVL_PP2_TXQ_MAX;
|
|
info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
|
|
|
|
info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
|
|
info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
|
|
info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
|
|
|
|
info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
|
|
info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
|
|
info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
|
|
|
|
info->rx_offload_capa = MRVL_RX_OFFLOADS;
|
|
info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
|
|
|
|
info->tx_offload_capa = MRVL_TX_OFFLOADS;
|
|
info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
|
|
|
|
info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
|
|
RTE_ETH_RSS_NONFRAG_IPV4_TCP |
|
|
RTE_ETH_RSS_NONFRAG_IPV4_UDP;
|
|
|
|
/* By default packets are dropped if no descriptors are available */
|
|
info->default_rxconf.rx_drop_en = 1;
|
|
|
|
info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
|
|
info->max_mtu = priv->max_mtu;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* Return supported packet types.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure (unused).
|
|
*
|
|
* @return
|
|
* Const pointer to the table with supported packet types.
|
|
*/
|
|
static const uint32_t *
|
|
mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
|
|
{
|
|
static const uint32_t ptypes[] = {
|
|
RTE_PTYPE_L2_ETHER,
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
RTE_PTYPE_L2_ETHER_QINQ,
|
|
RTE_PTYPE_L3_IPV4,
|
|
RTE_PTYPE_L3_IPV4_EXT,
|
|
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
|
|
RTE_PTYPE_L3_IPV6,
|
|
RTE_PTYPE_L3_IPV6_EXT,
|
|
RTE_PTYPE_L2_ETHER_ARP,
|
|
RTE_PTYPE_L4_TCP,
|
|
RTE_PTYPE_L4_UDP
|
|
};
|
|
|
|
return ptypes;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get information about specific receive queue.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param rx_queue_id
|
|
* Receive queue index.
|
|
* @param qinfo
|
|
* Receive queue information structure.
|
|
*/
|
|
static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
|
struct rte_eth_rxq_info *qinfo)
|
|
{
|
|
struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int inq = priv->rxq_map[rx_queue_id].inq;
|
|
int tc = priv->rxq_map[rx_queue_id].tc;
|
|
struct pp2_ppio_tc_params *tc_params =
|
|
&priv->ppio_params.inqs_params.tcs_params[tc];
|
|
|
|
qinfo->mp = q->mp;
|
|
qinfo->nb_desc = tc_params->inqs_params[inq].size;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get information about specific transmit queue.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param tx_queue_id
|
|
* Transmit queue index.
|
|
* @param qinfo
|
|
* Transmit queue information structure.
|
|
*/
|
|
static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
|
struct rte_eth_txq_info *qinfo)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
|
|
|
|
qinfo->nb_desc =
|
|
priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
|
|
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to Configure a VLAN filter.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param vlan_id
|
|
* VLAN ID to filter.
|
|
* @param on
|
|
* Toggle filter.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
|
|
if (priv->isolated)
|
|
return -ENOTSUP;
|
|
|
|
if (!priv->ppio)
|
|
return 0;
|
|
|
|
return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
|
|
pp2_ppio_remove_vlan(priv->ppio, vlan_id);
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to Configure VLAN offload.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param mask
|
|
* VLAN offload mask.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
|
|
{
|
|
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
|
|
int ret;
|
|
|
|
if (mask & RTE_ETH_VLAN_STRIP_MASK) {
|
|
MRVL_LOG(ERR, "VLAN stripping is not supported\n");
|
|
return -ENOTSUP;
|
|
}
|
|
|
|
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
|
|
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
|
|
ret = mrvl_populate_vlan_table(dev, 1);
|
|
else
|
|
ret = mrvl_populate_vlan_table(dev, 0);
|
|
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
|
|
MRVL_LOG(ERR, "Extend VLAN not supported\n");
|
|
return -ENOTSUP;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* Release buffers to hardware bpool (buffer-pool)
|
|
*
|
|
* @param rxq
|
|
* Receive queue pointer.
|
|
* @param num
|
|
* Number of buffers to release to bpool.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
|
|
{
|
|
struct buff_release_entry entries[num];
|
|
struct rte_mbuf *mbufs[num];
|
|
int i, ret;
|
|
unsigned int core_id;
|
|
struct pp2_hif *hif;
|
|
struct pp2_bpool *bpool;
|
|
|
|
core_id = rte_lcore_id();
|
|
if (core_id == LCORE_ID_ANY)
|
|
core_id = rte_get_main_lcore();
|
|
|
|
hif = mrvl_get_hif(rxq->priv, core_id);
|
|
if (!hif)
|
|
return -1;
|
|
|
|
bpool = rxq->priv->bpool;
|
|
|
|
ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
|
|
cookie_addr_high =
|
|
(uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
|
|
|
|
for (i = 0; i < num; i++) {
|
|
if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
|
|
!= cookie_addr_high) {
|
|
MRVL_LOG(ERR,
|
|
"mbuf virtual addr high is out of range "
|
|
"0x%x instead of 0x%x\n",
|
|
(uint32_t)((uint64_t)mbufs[i] >> 32),
|
|
(uint32_t)(cookie_addr_high >> 32));
|
|
goto out;
|
|
}
|
|
|
|
entries[i].buff.addr =
|
|
rte_mbuf_data_iova_default(mbufs[i]);
|
|
entries[i].buff.cookie = (uintptr_t)mbufs[i];
|
|
entries[i].bpool = bpool;
|
|
}
|
|
|
|
pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
|
|
mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
|
|
|
|
if (i != num)
|
|
goto out;
|
|
|
|
return 0;
|
|
out:
|
|
for (; i < num; i++)
|
|
rte_pktmbuf_free(mbufs[i]);
|
|
|
|
return -1;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to configure the receive queue.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param idx
|
|
* RX queue index.
|
|
* @param desc
|
|
* Number of descriptors to configure in queue.
|
|
* @param socket
|
|
* NUMA socket on which memory must be allocated.
|
|
* @param conf
|
|
* Thresholds parameters.
|
|
* @param mp
|
|
* Memory pool for buffer allocations.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
|
unsigned int socket,
|
|
const struct rte_eth_rxconf *conf,
|
|
struct rte_mempool *mp)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
struct mrvl_rxq *rxq;
|
|
uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
|
|
uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
|
|
int ret, tc, inq;
|
|
uint64_t offloads;
|
|
|
|
offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
|
|
|
|
if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
|
|
/*
|
|
* Unknown TC mapping, mapping will not have a correct queue.
|
|
*/
|
|
MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
|
|
idx, priv->ppio_id);
|
|
return -EFAULT;
|
|
}
|
|
|
|
frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
|
|
if (frame_size < max_rx_pktlen) {
|
|
MRVL_LOG(WARNING,
|
|
"Mbuf size must be increased to %u bytes to hold up "
|
|
"to %u bytes of data.",
|
|
max_rx_pktlen + buf_size - frame_size,
|
|
max_rx_pktlen);
|
|
dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
|
|
MRVL_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
|
|
}
|
|
|
|
if (dev->data->rx_queues[idx]) {
|
|
rte_free(dev->data->rx_queues[idx]);
|
|
dev->data->rx_queues[idx] = NULL;
|
|
}
|
|
|
|
rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
|
|
if (!rxq)
|
|
return -ENOMEM;
|
|
|
|
rxq->priv = priv;
|
|
rxq->mp = mp;
|
|
rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
|
|
rxq->queue_id = idx;
|
|
rxq->port_id = dev->data->port_id;
|
|
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
|
|
|
|
tc = priv->rxq_map[rxq->queue_id].tc,
|
|
inq = priv->rxq_map[rxq->queue_id].inq;
|
|
priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
|
|
desc;
|
|
|
|
ret = mrvl_fill_bpool(rxq, desc);
|
|
if (ret) {
|
|
rte_free(rxq);
|
|
return ret;
|
|
}
|
|
|
|
priv->bpool_init_size += desc;
|
|
|
|
dev->data->rx_queues[idx] = rxq;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to release the receive queue.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param qid
|
|
* Receive queue index.
|
|
*/
|
|
static void
|
|
mrvl_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
|
{
|
|
struct mrvl_rxq *q = dev->data->rx_queues[qid];
|
|
struct pp2_ppio_tc_params *tc_params;
|
|
int i, num, tc, inq;
|
|
struct pp2_hif *hif;
|
|
unsigned int core_id = rte_lcore_id();
|
|
|
|
if (core_id == LCORE_ID_ANY)
|
|
core_id = rte_get_main_lcore();
|
|
|
|
if (!q)
|
|
return;
|
|
|
|
hif = mrvl_get_hif(q->priv, core_id);
|
|
|
|
if (!hif)
|
|
return;
|
|
|
|
tc = q->priv->rxq_map[q->queue_id].tc;
|
|
inq = q->priv->rxq_map[q->queue_id].inq;
|
|
tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
|
|
num = tc_params->inqs_params[inq].size;
|
|
for (i = 0; i < num; i++) {
|
|
struct pp2_buff_inf inf;
|
|
uint64_t addr;
|
|
|
|
pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
|
|
addr = cookie_addr_high | inf.cookie;
|
|
rte_pktmbuf_free((struct rte_mbuf *)addr);
|
|
}
|
|
|
|
rte_free(q);
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to configure the transmit queue.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param idx
|
|
* Transmit queue index.
|
|
* @param desc
|
|
* Number of descriptors to configure in the queue.
|
|
* @param socket
|
|
* NUMA socket on which memory must be allocated.
|
|
* @param conf
|
|
* Tx queue configuration parameters.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
|
unsigned int socket,
|
|
const struct rte_eth_txconf *conf)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
struct mrvl_txq *txq;
|
|
|
|
if (dev->data->tx_queues[idx]) {
|
|
rte_free(dev->data->tx_queues[idx]);
|
|
dev->data->tx_queues[idx] = NULL;
|
|
}
|
|
|
|
txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
|
|
if (!txq)
|
|
return -ENOMEM;
|
|
|
|
txq->priv = priv;
|
|
txq->queue_id = idx;
|
|
txq->port_id = dev->data->port_id;
|
|
txq->tx_deferred_start = conf->tx_deferred_start;
|
|
dev->data->tx_queues[idx] = txq;
|
|
|
|
priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to release the transmit queue.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param qid
|
|
* Transmit queue index.
|
|
*/
|
|
static void
|
|
mrvl_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
|
|
{
|
|
struct mrvl_txq *q = dev->data->tx_queues[qid];
|
|
|
|
if (!q)
|
|
return;
|
|
|
|
rte_free(q);
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get flow control configuration.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param fc_conf
|
|
* Pointer to the flow control configuration.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
int ret, en;
|
|
|
|
if (!priv->ppio) {
|
|
memcpy(fc_conf, &priv->fc_conf, sizeof(struct rte_eth_fc_conf));
|
|
return 0;
|
|
}
|
|
|
|
fc_conf->autoneg = 1;
|
|
ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to read rx pause state");
|
|
return ret;
|
|
}
|
|
|
|
fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
|
|
|
|
ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to read tx pause state");
|
|
return ret;
|
|
}
|
|
|
|
if (en) {
|
|
if (fc_conf->mode == RTE_ETH_FC_NONE)
|
|
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
|
|
else
|
|
fc_conf->mode = RTE_ETH_FC_FULL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to set flow control configuration.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param fc_conf
|
|
* Pointer to the flow control configuration.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
struct pp2_ppio_tx_pause_params mrvl_pause_params;
|
|
int ret;
|
|
int rx_en, tx_en;
|
|
|
|
if (fc_conf->high_water ||
|
|
fc_conf->low_water ||
|
|
fc_conf->pause_time ||
|
|
fc_conf->mac_ctrl_frame_fwd) {
|
|
MRVL_LOG(ERR, "Flowctrl parameter is not supported");
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (fc_conf->autoneg == 0) {
|
|
MRVL_LOG(ERR, "Flowctrl Autoneg disable is not supported");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (!priv->ppio) {
|
|
memcpy(&priv->fc_conf, fc_conf, sizeof(struct rte_eth_fc_conf));
|
|
priv->flow_ctrl = 1;
|
|
return 0;
|
|
}
|
|
|
|
switch (fc_conf->mode) {
|
|
case RTE_ETH_FC_FULL:
|
|
rx_en = 1;
|
|
tx_en = 1;
|
|
break;
|
|
case RTE_ETH_FC_TX_PAUSE:
|
|
rx_en = 0;
|
|
tx_en = 1;
|
|
break;
|
|
case RTE_ETH_FC_RX_PAUSE:
|
|
rx_en = 1;
|
|
tx_en = 0;
|
|
break;
|
|
case RTE_ETH_FC_NONE:
|
|
rx_en = 0;
|
|
tx_en = 0;
|
|
break;
|
|
default:
|
|
MRVL_LOG(ERR, "Incorrect Flow control flag (%d)",
|
|
fc_conf->mode);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Set RX flow control */
|
|
ret = pp2_ppio_set_rx_pause(priv->ppio, rx_en);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to change RX flowctrl");
|
|
return ret;
|
|
}
|
|
|
|
/* Set TX flow control */
|
|
mrvl_pause_params.en = tx_en;
|
|
/* all inqs participate in xon/xoff decision */
|
|
mrvl_pause_params.use_tc_pause_inqs = 0;
|
|
ret = pp2_ppio_set_tx_pause(priv->ppio, &mrvl_pause_params);
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to change TX flowctrl");
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* Update RSS hash configuration
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @param rss_conf
|
|
* Pointer to RSS configuration.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_rss_hash_update(struct rte_eth_dev *dev,
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
|
|
if (priv->isolated)
|
|
return -ENOTSUP;
|
|
|
|
return mrvl_configure_rss(priv, rss_conf);
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get RSS hash configuration.
|
|
*
|
|
* @param dev
|
|
* Pointer to Ethernet device structure.
|
|
* @rss_conf
|
|
* Pointer to RSS configuration.
|
|
*
|
|
* @return
|
|
* Always 0.
|
|
*/
|
|
static int
|
|
mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
{
|
|
struct mrvl_priv *priv = dev->data->dev_private;
|
|
enum pp2_ppio_hash_type hash_type =
|
|
priv->ppio_params.inqs_params.hash_type;
|
|
|
|
rss_conf->rss_key = NULL;
|
|
|
|
if (hash_type == PP2_PPIO_HASH_T_NONE)
|
|
rss_conf->rss_hf = 0;
|
|
else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
|
|
rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
|
|
else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
|
|
rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
|
|
else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
|
|
rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get rte_flow callbacks.
|
|
*
|
|
* @param dev
|
|
* Pointer to the device structure.
|
|
* @param ops
|
|
* Pointer to pass the flow ops.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_eth_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
|
|
const struct rte_flow_ops **ops)
|
|
{
|
|
*ops = &mrvl_flow_ops;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get rte_mtr callbacks.
|
|
*
|
|
* @param dev
|
|
* Pointer to the device structure.
|
|
* @param ops
|
|
* Pointer to pass the mtr ops.
|
|
*
|
|
* @return
|
|
* Always 0.
|
|
*/
|
|
static int
|
|
mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
|
|
{
|
|
*(const void **)ops = &mrvl_mtr_ops;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to get rte_tm callbacks.
|
|
*
|
|
* @param dev
|
|
* Pointer to the device structure.
|
|
* @param ops
|
|
* Pointer to pass the tm ops.
|
|
*
|
|
* @return
|
|
* Always 0.
|
|
*/
|
|
static int
|
|
mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
|
|
{
|
|
*(const void **)ops = &mrvl_tm_ops;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct eth_dev_ops mrvl_ops = {
|
|
.dev_configure = mrvl_dev_configure,
|
|
.dev_start = mrvl_dev_start,
|
|
.dev_stop = mrvl_dev_stop,
|
|
.dev_set_link_up = mrvl_dev_set_link_up,
|
|
.dev_set_link_down = mrvl_dev_set_link_down,
|
|
.dev_close = mrvl_dev_close,
|
|
.link_update = mrvl_link_update,
|
|
.promiscuous_enable = mrvl_promiscuous_enable,
|
|
.allmulticast_enable = mrvl_allmulticast_enable,
|
|
.promiscuous_disable = mrvl_promiscuous_disable,
|
|
.allmulticast_disable = mrvl_allmulticast_disable,
|
|
.mac_addr_remove = mrvl_mac_addr_remove,
|
|
.mac_addr_add = mrvl_mac_addr_add,
|
|
.mac_addr_set = mrvl_mac_addr_set,
|
|
.mtu_set = mrvl_mtu_set,
|
|
.stats_get = mrvl_stats_get,
|
|
.stats_reset = mrvl_stats_reset,
|
|
.xstats_get = mrvl_xstats_get,
|
|
.xstats_reset = mrvl_xstats_reset,
|
|
.xstats_get_names = mrvl_xstats_get_names,
|
|
.dev_infos_get = mrvl_dev_infos_get,
|
|
.dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
|
|
.rxq_info_get = mrvl_rxq_info_get,
|
|
.txq_info_get = mrvl_txq_info_get,
|
|
.vlan_filter_set = mrvl_vlan_filter_set,
|
|
.vlan_offload_set = mrvl_vlan_offload_set,
|
|
.tx_queue_start = mrvl_tx_queue_start,
|
|
.tx_queue_stop = mrvl_tx_queue_stop,
|
|
.rx_queue_setup = mrvl_rx_queue_setup,
|
|
.rx_queue_release = mrvl_rx_queue_release,
|
|
.tx_queue_setup = mrvl_tx_queue_setup,
|
|
.tx_queue_release = mrvl_tx_queue_release,
|
|
.flow_ctrl_get = mrvl_flow_ctrl_get,
|
|
.flow_ctrl_set = mrvl_flow_ctrl_set,
|
|
.rss_hash_update = mrvl_rss_hash_update,
|
|
.rss_hash_conf_get = mrvl_rss_hash_conf_get,
|
|
.flow_ops_get = mrvl_eth_flow_ops_get,
|
|
.mtr_ops_get = mrvl_mtr_ops_get,
|
|
.tm_ops_get = mrvl_tm_ops_get,
|
|
};
|
|
|
|
/**
|
|
* Return packet type information and l3/l4 offsets.
|
|
*
|
|
* @param desc
|
|
* Pointer to the received packet descriptor.
|
|
* @param l3_offset
|
|
* l3 packet offset.
|
|
* @param l4_offset
|
|
* l4 packet offset.
|
|
*
|
|
* @return
|
|
* Packet type information.
|
|
*/
|
|
static inline uint64_t
|
|
mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
|
|
uint8_t *l3_offset, uint8_t *l4_offset)
|
|
{
|
|
enum pp2_inq_l3_type l3_type;
|
|
enum pp2_inq_l4_type l4_type;
|
|
enum pp2_inq_vlan_tag vlan_tag;
|
|
uint64_t packet_type;
|
|
|
|
pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
|
|
pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
|
|
pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
|
|
|
|
packet_type = RTE_PTYPE_L2_ETHER;
|
|
|
|
switch (vlan_tag) {
|
|
case PP2_INQ_VLAN_TAG_SINGLE:
|
|
packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
|
|
break;
|
|
case PP2_INQ_VLAN_TAG_DOUBLE:
|
|
case PP2_INQ_VLAN_TAG_TRIPLE:
|
|
packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
switch (l3_type) {
|
|
case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
|
|
packet_type |= RTE_PTYPE_L3_IPV4;
|
|
break;
|
|
case PP2_INQ_L3_TYPE_IPV4_OK:
|
|
packet_type |= RTE_PTYPE_L3_IPV4_EXT;
|
|
break;
|
|
case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
|
|
packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
|
|
break;
|
|
case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
|
|
packet_type |= RTE_PTYPE_L3_IPV6;
|
|
break;
|
|
case PP2_INQ_L3_TYPE_IPV6_EXT:
|
|
packet_type |= RTE_PTYPE_L3_IPV6_EXT;
|
|
break;
|
|
case PP2_INQ_L3_TYPE_ARP:
|
|
packet_type |= RTE_PTYPE_L2_ETHER_ARP;
|
|
/*
|
|
* In case of ARP l4_offset is set to wrong value.
|
|
* Set it to proper one so that later on mbuf->l3_len can be
|
|
* calculated subtracting l4_offset and l3_offset.
|
|
*/
|
|
*l4_offset = *l3_offset + MRVL_ARP_LENGTH;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
switch (l4_type) {
|
|
case PP2_INQ_L4_TYPE_TCP:
|
|
packet_type |= RTE_PTYPE_L4_TCP;
|
|
break;
|
|
case PP2_INQ_L4_TYPE_UDP:
|
|
packet_type |= RTE_PTYPE_L4_UDP;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
return packet_type;
|
|
}
|
|
|
|
/**
|
|
* Get offload information from the received packet descriptor.
|
|
*
|
|
* @param desc
|
|
* Pointer to the received packet descriptor.
|
|
*
|
|
* @return
|
|
* Mbuf offload flags.
|
|
*/
|
|
static inline uint64_t
|
|
mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc, uint64_t packet_type)
|
|
{
|
|
uint64_t flags = 0;
|
|
enum pp2_inq_desc_status status;
|
|
|
|
if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
|
|
status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
|
|
if (unlikely(status != PP2_DESC_ERR_OK))
|
|
flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
|
|
else
|
|
flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
|
|
}
|
|
|
|
if (((packet_type & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) ||
|
|
((packet_type & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP)) {
|
|
status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
|
|
if (unlikely(status != PP2_DESC_ERR_OK))
|
|
flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
|
|
else
|
|
flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
|
|
}
|
|
|
|
return flags;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback for receive.
|
|
*
|
|
* @param rxq
|
|
* Generic pointer to the receive queue.
|
|
* @param rx_pkts
|
|
* Array to store received packets.
|
|
* @param nb_pkts
|
|
* Maximum number of packets in array.
|
|
*
|
|
* @return
|
|
* Number of packets successfully received.
|
|
*/
|
|
static uint16_t
|
|
mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
|
{
|
|
struct mrvl_rxq *q = rxq;
|
|
struct pp2_ppio_desc descs[nb_pkts];
|
|
struct pp2_bpool *bpool;
|
|
int i, ret, rx_done = 0;
|
|
int num;
|
|
struct pp2_hif *hif;
|
|
unsigned int core_id = rte_lcore_id();
|
|
|
|
hif = mrvl_get_hif(q->priv, core_id);
|
|
|
|
if (unlikely(!q->priv->ppio || !hif))
|
|
return 0;
|
|
|
|
bpool = q->priv->bpool;
|
|
|
|
ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
|
|
q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
|
|
if (unlikely(ret < 0))
|
|
return 0;
|
|
|
|
mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
|
|
|
|
for (i = 0; i < nb_pkts; i++) {
|
|
struct rte_mbuf *mbuf;
|
|
uint8_t l3_offset, l4_offset;
|
|
enum pp2_inq_desc_status status;
|
|
uint64_t addr;
|
|
|
|
if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
|
|
struct pp2_ppio_desc *pref_desc;
|
|
u64 pref_addr;
|
|
|
|
pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
|
|
pref_addr = cookie_addr_high |
|
|
pp2_ppio_inq_desc_get_cookie(pref_desc);
|
|
rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
|
|
rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
|
|
}
|
|
|
|
addr = cookie_addr_high |
|
|
pp2_ppio_inq_desc_get_cookie(&descs[i]);
|
|
mbuf = (struct rte_mbuf *)addr;
|
|
rte_pktmbuf_reset(mbuf);
|
|
|
|
/* drop packet in case of mac, overrun or resource error */
|
|
status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
|
|
if ((unlikely(status != PP2_DESC_ERR_OK)) &&
|
|
!(q->priv->forward_bad_frames)) {
|
|
struct pp2_buff_inf binf = {
|
|
.addr = rte_mbuf_data_iova_default(mbuf),
|
|
.cookie = (uint64_t)mbuf,
|
|
};
|
|
|
|
pp2_bpool_put_buff(hif, bpool, &binf);
|
|
mrvl_port_bpool_size
|
|
[bpool->pp2_id][bpool->id][core_id]++;
|
|
q->drop_mac++;
|
|
continue;
|
|
}
|
|
|
|
mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
|
|
mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
|
|
mbuf->data_len = mbuf->pkt_len;
|
|
mbuf->port = q->port_id;
|
|
mbuf->packet_type =
|
|
mrvl_desc_to_packet_type_and_offset(&descs[i],
|
|
&l3_offset,
|
|
&l4_offset);
|
|
mbuf->l2_len = l3_offset;
|
|
mbuf->l3_len = l4_offset - l3_offset;
|
|
|
|
if (likely(q->cksum_enabled))
|
|
mbuf->ol_flags =
|
|
mrvl_desc_to_ol_flags(&descs[i],
|
|
mbuf->packet_type);
|
|
|
|
rx_pkts[rx_done++] = mbuf;
|
|
q->bytes_recv += mbuf->pkt_len;
|
|
}
|
|
|
|
if (rte_spinlock_trylock(&q->priv->lock) == 1) {
|
|
num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
|
|
|
|
if (unlikely(num <= q->priv->bpool_min_size ||
|
|
(!rx_done && num < q->priv->bpool_init_size))) {
|
|
mrvl_fill_bpool(q, q->priv->fill_bpool_buffs);
|
|
} else if (unlikely(num > q->priv->bpool_max_size)) {
|
|
int i;
|
|
int pkt_to_remove = num - q->priv->bpool_init_size;
|
|
struct rte_mbuf *mbuf;
|
|
struct pp2_buff_inf buff;
|
|
|
|
for (i = 0; i < pkt_to_remove; i++) {
|
|
ret = pp2_bpool_get_buff(hif, bpool, &buff);
|
|
if (ret)
|
|
break;
|
|
mbuf = (struct rte_mbuf *)
|
|
(cookie_addr_high | buff.cookie);
|
|
rte_pktmbuf_free(mbuf);
|
|
}
|
|
mrvl_port_bpool_size
|
|
[bpool->pp2_id][bpool->id][core_id] -= i;
|
|
}
|
|
rte_spinlock_unlock(&q->priv->lock);
|
|
}
|
|
|
|
return rx_done;
|
|
}
|
|
|
|
/**
|
|
* Prepare offload information.
|
|
*
|
|
* @param ol_flags
|
|
* Offload flags.
|
|
* @param l3_type
|
|
* Pointer to the pp2_ouq_l3_type structure.
|
|
* @param l4_type
|
|
* Pointer to the pp2_outq_l4_type structure.
|
|
* @param gen_l3_cksum
|
|
* Will be set to 1 in case l3 checksum is computed.
|
|
* @param l4_cksum
|
|
* Will be set to 1 in case l4 checksum is computed.
|
|
*/
|
|
static inline void
|
|
mrvl_prepare_proto_info(uint64_t ol_flags,
|
|
enum pp2_outq_l3_type *l3_type,
|
|
enum pp2_outq_l4_type *l4_type,
|
|
int *gen_l3_cksum,
|
|
int *gen_l4_cksum)
|
|
{
|
|
/*
|
|
* Based on ol_flags prepare information
|
|
* for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
|
|
* for offloading.
|
|
* in most of the checksum cases ipv4 must be set, so this is the
|
|
* default value
|
|
*/
|
|
*l3_type = PP2_OUTQ_L3_TYPE_IPV4;
|
|
*gen_l3_cksum = ol_flags & RTE_MBUF_F_TX_IP_CKSUM ? 1 : 0;
|
|
|
|
if (ol_flags & RTE_MBUF_F_TX_IPV6) {
|
|
*l3_type = PP2_OUTQ_L3_TYPE_IPV6;
|
|
/* no checksum for ipv6 header */
|
|
*gen_l3_cksum = 0;
|
|
}
|
|
|
|
if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) {
|
|
*l4_type = PP2_OUTQ_L4_TYPE_TCP;
|
|
*gen_l4_cksum = 1;
|
|
} else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) {
|
|
*l4_type = PP2_OUTQ_L4_TYPE_UDP;
|
|
*gen_l4_cksum = 1;
|
|
} else {
|
|
*l4_type = PP2_OUTQ_L4_TYPE_OTHER;
|
|
/* no checksum for other type */
|
|
*gen_l4_cksum = 0;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Release already sent buffers to bpool (buffer-pool).
|
|
*
|
|
* @param ppio
|
|
* Pointer to the port structure.
|
|
* @param hif
|
|
* Pointer to the MUSDK hardware interface.
|
|
* @param sq
|
|
* Pointer to the shadow queue.
|
|
* @param qid
|
|
* Queue id number.
|
|
* @param force
|
|
* Force releasing packets.
|
|
*/
|
|
static inline void
|
|
mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
|
|
unsigned int core_id, struct mrvl_shadow_txq *sq,
|
|
int qid, int force)
|
|
{
|
|
struct buff_release_entry *entry;
|
|
uint16_t nb_done = 0, num = 0, skip_bufs = 0;
|
|
int i;
|
|
|
|
pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
|
|
|
|
sq->num_to_release += nb_done;
|
|
|
|
if (likely(!force &&
|
|
sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
|
|
return;
|
|
|
|
nb_done = sq->num_to_release;
|
|
sq->num_to_release = 0;
|
|
|
|
for (i = 0; i < nb_done; i++) {
|
|
entry = &sq->ent[sq->tail + num];
|
|
if (unlikely(!entry->buff.addr)) {
|
|
MRVL_LOG(ERR,
|
|
"Shadow memory @%d: cookie(%lx), pa(%lx)!",
|
|
sq->tail, (u64)entry->buff.cookie,
|
|
(u64)entry->buff.addr);
|
|
skip_bufs = 1;
|
|
goto skip;
|
|
}
|
|
|
|
if (unlikely(!entry->bpool)) {
|
|
struct rte_mbuf *mbuf;
|
|
|
|
mbuf = (struct rte_mbuf *)entry->buff.cookie;
|
|
rte_pktmbuf_free(mbuf);
|
|
skip_bufs = 1;
|
|
goto skip;
|
|
}
|
|
|
|
mrvl_port_bpool_size
|
|
[entry->bpool->pp2_id][entry->bpool->id][core_id]++;
|
|
num++;
|
|
if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
|
|
goto skip;
|
|
continue;
|
|
skip:
|
|
if (likely(num))
|
|
pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
|
|
num += skip_bufs;
|
|
sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
|
|
sq->size -= num;
|
|
num = 0;
|
|
skip_bufs = 0;
|
|
}
|
|
|
|
if (likely(num)) {
|
|
pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
|
|
sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
|
|
sq->size -= num;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* DPDK callback for transmit.
|
|
*
|
|
* @param txq
|
|
* Generic pointer transmit queue.
|
|
* @param tx_pkts
|
|
* Packets to transmit.
|
|
* @param nb_pkts
|
|
* Number of packets in array.
|
|
*
|
|
* @return
|
|
* Number of packets successfully transmitted.
|
|
*/
|
|
static uint16_t
|
|
mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
|
{
|
|
struct mrvl_txq *q = txq;
|
|
struct mrvl_shadow_txq *sq;
|
|
struct pp2_hif *hif;
|
|
struct pp2_ppio_desc descs[nb_pkts];
|
|
unsigned int core_id = rte_lcore_id();
|
|
int i, bytes_sent = 0;
|
|
uint16_t num, sq_free_size;
|
|
uint64_t addr;
|
|
|
|
hif = mrvl_get_hif(q->priv, core_id);
|
|
sq = &q->shadow_txqs[core_id];
|
|
|
|
if (unlikely(!q->priv->ppio || !hif))
|
|
return 0;
|
|
|
|
if (sq->size)
|
|
mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
|
|
sq, q->queue_id, 0);
|
|
|
|
sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
|
|
if (unlikely(nb_pkts > sq_free_size))
|
|
nb_pkts = sq_free_size;
|
|
|
|
for (i = 0; i < nb_pkts; i++) {
|
|
struct rte_mbuf *mbuf = tx_pkts[i];
|
|
int gen_l3_cksum, gen_l4_cksum;
|
|
enum pp2_outq_l3_type l3_type;
|
|
enum pp2_outq_l4_type l4_type;
|
|
|
|
if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
|
|
struct rte_mbuf *pref_pkt_hdr;
|
|
|
|
pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
|
|
rte_mbuf_prefetch_part1(pref_pkt_hdr);
|
|
rte_mbuf_prefetch_part2(pref_pkt_hdr);
|
|
}
|
|
|
|
mrvl_fill_shadowq(sq, mbuf);
|
|
mrvl_fill_desc(&descs[i], mbuf);
|
|
|
|
bytes_sent += rte_pktmbuf_pkt_len(mbuf);
|
|
/*
|
|
* in case unsupported ol_flags were passed
|
|
* do not update descriptor offload information
|
|
*/
|
|
if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS))
|
|
continue;
|
|
mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type,
|
|
&gen_l3_cksum, &gen_l4_cksum);
|
|
|
|
pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
|
|
mbuf->l2_len,
|
|
mbuf->l2_len + mbuf->l3_len,
|
|
gen_l3_cksum, gen_l4_cksum);
|
|
}
|
|
|
|
num = nb_pkts;
|
|
pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
|
|
/* number of packets that were not sent */
|
|
if (unlikely(num > nb_pkts)) {
|
|
for (i = nb_pkts; i < num; i++) {
|
|
sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
|
|
MRVL_PP2_TX_SHADOWQ_MASK;
|
|
addr = sq->ent[sq->head].buff.cookie;
|
|
bytes_sent -=
|
|
rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
|
|
}
|
|
sq->size -= num - nb_pkts;
|
|
}
|
|
|
|
q->bytes_sent += bytes_sent;
|
|
|
|
return nb_pkts;
|
|
}
|
|
|
|
/** DPDK callback for S/G transmit.
|
|
*
|
|
* @param txq
|
|
* Generic pointer transmit queue.
|
|
* @param tx_pkts
|
|
* Packets to transmit.
|
|
* @param nb_pkts
|
|
* Number of packets in array.
|
|
*
|
|
* @return
|
|
* Number of packets successfully transmitted.
|
|
*/
|
|
static uint16_t
|
|
mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
|
|
uint16_t nb_pkts)
|
|
{
|
|
struct mrvl_txq *q = txq;
|
|
struct mrvl_shadow_txq *sq;
|
|
struct pp2_hif *hif;
|
|
struct pp2_ppio_desc descs[nb_pkts * PP2_PPIO_DESC_NUM_FRAGS];
|
|
struct pp2_ppio_sg_pkts pkts;
|
|
uint8_t frags[nb_pkts];
|
|
unsigned int core_id = rte_lcore_id();
|
|
int i, j, bytes_sent = 0;
|
|
int tail, tail_first;
|
|
uint16_t num, sq_free_size;
|
|
uint16_t nb_segs, total_descs = 0;
|
|
uint64_t addr;
|
|
|
|
hif = mrvl_get_hif(q->priv, core_id);
|
|
sq = &q->shadow_txqs[core_id];
|
|
pkts.frags = frags;
|
|
pkts.num = 0;
|
|
|
|
if (unlikely(!q->priv->ppio || !hif))
|
|
return 0;
|
|
|
|
if (sq->size)
|
|
mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
|
|
sq, q->queue_id, 0);
|
|
|
|
/* Save shadow queue free size */
|
|
sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
|
|
|
|
tail = 0;
|
|
for (i = 0; i < nb_pkts; i++) {
|
|
struct rte_mbuf *mbuf = tx_pkts[i];
|
|
struct rte_mbuf *seg = NULL;
|
|
int gen_l3_cksum, gen_l4_cksum;
|
|
enum pp2_outq_l3_type l3_type;
|
|
enum pp2_outq_l4_type l4_type;
|
|
|
|
nb_segs = mbuf->nb_segs;
|
|
tail_first = tail;
|
|
total_descs += nb_segs;
|
|
|
|
/*
|
|
* Check if total_descs does not exceed
|
|
* shadow queue free size
|
|
*/
|
|
if (unlikely(total_descs > sq_free_size)) {
|
|
total_descs -= nb_segs;
|
|
break;
|
|
}
|
|
|
|
/* Check if nb_segs does not exceed the max nb of desc per
|
|
* fragmented packet
|
|
*/
|
|
if (nb_segs > PP2_PPIO_DESC_NUM_FRAGS) {
|
|
total_descs -= nb_segs;
|
|
RTE_LOG(ERR, PMD,
|
|
"Too many segments. Packet won't be sent.\n");
|
|
break;
|
|
}
|
|
|
|
if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
|
|
struct rte_mbuf *pref_pkt_hdr;
|
|
|
|
pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
|
|
rte_mbuf_prefetch_part1(pref_pkt_hdr);
|
|
rte_mbuf_prefetch_part2(pref_pkt_hdr);
|
|
}
|
|
|
|
pkts.frags[pkts.num] = nb_segs;
|
|
pkts.num++;
|
|
|
|
seg = mbuf;
|
|
for (j = 0; j < nb_segs - 1; j++) {
|
|
/* For the subsequent segments, set shadow queue
|
|
* buffer to NULL
|
|
*/
|
|
mrvl_fill_shadowq(sq, NULL);
|
|
mrvl_fill_desc(&descs[tail], seg);
|
|
|
|
tail++;
|
|
seg = seg->next;
|
|
}
|
|
/* Put first mbuf info in last shadow queue entry */
|
|
mrvl_fill_shadowq(sq, mbuf);
|
|
/* Update descriptor with last segment */
|
|
mrvl_fill_desc(&descs[tail++], seg);
|
|
|
|
bytes_sent += rte_pktmbuf_pkt_len(mbuf);
|
|
/* In case unsupported ol_flags were passed
|
|
* do not update descriptor offload information
|
|
*/
|
|
if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS))
|
|
continue;
|
|
mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type,
|
|
&gen_l3_cksum, &gen_l4_cksum);
|
|
|
|
pp2_ppio_outq_desc_set_proto_info(&descs[tail_first], l3_type,
|
|
l4_type, mbuf->l2_len,
|
|
mbuf->l2_len + mbuf->l3_len,
|
|
gen_l3_cksum, gen_l4_cksum);
|
|
}
|
|
|
|
num = total_descs;
|
|
pp2_ppio_send_sg(q->priv->ppio, hif, q->queue_id, descs,
|
|
&total_descs, &pkts);
|
|
/* number of packets that were not sent */
|
|
if (unlikely(num > total_descs)) {
|
|
for (i = total_descs; i < num; i++) {
|
|
sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
|
|
MRVL_PP2_TX_SHADOWQ_MASK;
|
|
|
|
addr = sq->ent[sq->head].buff.cookie;
|
|
if (addr)
|
|
bytes_sent -=
|
|
rte_pktmbuf_pkt_len((struct rte_mbuf *)
|
|
(cookie_addr_high | addr));
|
|
}
|
|
sq->size -= num - total_descs;
|
|
nb_pkts = pkts.num;
|
|
}
|
|
|
|
q->bytes_sent += bytes_sent;
|
|
|
|
return nb_pkts;
|
|
}
|
|
|
|
/**
|
|
* Create private device structure.
|
|
*
|
|
* @param dev_name
|
|
* Pointer to the port name passed in the initialization parameters.
|
|
*
|
|
* @return
|
|
* Pointer to the newly allocated private device structure.
|
|
*/
|
|
static struct mrvl_priv *
|
|
mrvl_priv_create(const char *dev_name)
|
|
{
|
|
struct pp2_bpool_params bpool_params;
|
|
char match[MRVL_MATCH_LEN];
|
|
struct mrvl_priv *priv;
|
|
uint16_t max_frame_size;
|
|
int ret, bpool_bit;
|
|
|
|
priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
|
|
if (!priv)
|
|
return NULL;
|
|
|
|
ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
|
|
&priv->pp_id, &priv->ppio_id);
|
|
if (ret)
|
|
goto out_free_priv;
|
|
|
|
ret = pp2_ppio_get_l4_cksum_max_frame_size(priv->pp_id, priv->ppio_id,
|
|
&max_frame_size);
|
|
if (ret)
|
|
goto out_free_priv;
|
|
|
|
priv->max_mtu = max_frame_size + RTE_ETHER_CRC_LEN -
|
|
MRVL_PP2_ETH_HDRS_LEN;
|
|
|
|
bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
|
|
PP2_BPOOL_NUM_POOLS);
|
|
if (bpool_bit < 0)
|
|
goto out_free_priv;
|
|
priv->bpool_bit = bpool_bit;
|
|
|
|
snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
|
|
priv->bpool_bit);
|
|
memset(&bpool_params, 0, sizeof(bpool_params));
|
|
bpool_params.match = match;
|
|
bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
|
|
ret = pp2_bpool_init(&bpool_params, &priv->bpool);
|
|
if (ret)
|
|
goto out_clear_bpool_bit;
|
|
|
|
priv->ppio_params.type = PP2_PPIO_T_NIC;
|
|
rte_spinlock_init(&priv->lock);
|
|
|
|
return priv;
|
|
out_clear_bpool_bit:
|
|
used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
|
|
out_free_priv:
|
|
rte_free(priv);
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* Create device representing Ethernet port.
|
|
*
|
|
* @param name
|
|
* Pointer to the port's name.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
|
|
{
|
|
int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
|
|
struct rte_eth_dev *eth_dev;
|
|
struct mrvl_priv *priv;
|
|
struct ifreq req;
|
|
|
|
eth_dev = rte_eth_dev_allocate(name);
|
|
if (!eth_dev)
|
|
return -ENOMEM;
|
|
|
|
priv = mrvl_priv_create(name);
|
|
if (!priv) {
|
|
ret = -ENOMEM;
|
|
goto out_free;
|
|
}
|
|
eth_dev->data->dev_private = priv;
|
|
|
|
eth_dev->data->mac_addrs =
|
|
rte_zmalloc("mac_addrs",
|
|
RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
|
|
if (!eth_dev->data->mac_addrs) {
|
|
MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
|
|
ret = -ENOMEM;
|
|
goto out_free;
|
|
}
|
|
|
|
memset(&req, 0, sizeof(req));
|
|
strcpy(req.ifr_name, name);
|
|
ret = ioctl(fd, SIOCGIFHWADDR, &req);
|
|
if (ret)
|
|
goto out_free;
|
|
|
|
memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
|
|
req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN);
|
|
|
|
eth_dev->device = &vdev->device;
|
|
eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
|
|
mrvl_set_tx_function(eth_dev);
|
|
eth_dev->dev_ops = &mrvl_ops;
|
|
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
|
|
|
|
eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
|
|
|
|
rte_eth_dev_probing_finish(eth_dev);
|
|
return 0;
|
|
out_free:
|
|
rte_eth_dev_release_port(eth_dev);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* Callback used by rte_kvargs_process() during argument parsing.
|
|
*
|
|
* @param key
|
|
* Pointer to the parsed key (unused).
|
|
* @param value
|
|
* Pointer to the parsed value.
|
|
* @param extra_args
|
|
* Pointer to the extra arguments which contains address of the
|
|
* table of pointers to parsed interface names.
|
|
*
|
|
* @return
|
|
* Always 0.
|
|
*/
|
|
static int
|
|
mrvl_get_ifnames(const char *key __rte_unused, const char *value,
|
|
void *extra_args)
|
|
{
|
|
struct mrvl_ifnames *ifnames = extra_args;
|
|
|
|
ifnames->names[ifnames->idx++] = value;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to register the virtual device.
|
|
*
|
|
* @param vdev
|
|
* Pointer to the virtual device.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
|
|
{
|
|
struct rte_kvargs *kvlist;
|
|
struct mrvl_ifnames ifnames;
|
|
int ret = -EINVAL;
|
|
uint32_t i, ifnum, cfgnum;
|
|
const char *params;
|
|
|
|
params = rte_vdev_device_args(vdev);
|
|
if (!params)
|
|
return -EINVAL;
|
|
|
|
kvlist = rte_kvargs_parse(params, valid_args);
|
|
if (!kvlist)
|
|
return -EINVAL;
|
|
|
|
ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
|
|
if (ifnum > RTE_DIM(ifnames.names))
|
|
goto out_free_kvlist;
|
|
|
|
ifnames.idx = 0;
|
|
rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
|
|
mrvl_get_ifnames, &ifnames);
|
|
|
|
|
|
/*
|
|
* The below system initialization should be done only once,
|
|
* on the first provided configuration file
|
|
*/
|
|
if (!mrvl_cfg) {
|
|
cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
|
|
MRVL_LOG(INFO, "Parsing config file!");
|
|
if (cfgnum > 1) {
|
|
MRVL_LOG(ERR, "Cannot handle more than one config file!");
|
|
goto out_free_kvlist;
|
|
} else if (cfgnum == 1) {
|
|
rte_kvargs_process(kvlist, MRVL_CFG_ARG,
|
|
mrvl_get_cfg, &mrvl_cfg);
|
|
}
|
|
}
|
|
|
|
if (mrvl_dev_num)
|
|
goto init_devices;
|
|
|
|
MRVL_LOG(INFO, "Perform MUSDK initializations");
|
|
|
|
ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
|
|
if (ret)
|
|
goto out_free_kvlist;
|
|
|
|
ret = mrvl_init_pp2();
|
|
if (ret) {
|
|
MRVL_LOG(ERR, "Failed to init PP!");
|
|
rte_mvep_deinit(MVEP_MOD_T_PP2);
|
|
goto out_free_kvlist;
|
|
}
|
|
|
|
memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
|
|
memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
|
|
|
|
mrvl_lcore_first = RTE_MAX_LCORE;
|
|
mrvl_lcore_last = 0;
|
|
|
|
init_devices:
|
|
for (i = 0; i < ifnum; i++) {
|
|
MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
|
|
ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
|
|
if (ret)
|
|
goto out_cleanup;
|
|
mrvl_dev_num++;
|
|
}
|
|
|
|
rte_kvargs_free(kvlist);
|
|
|
|
return 0;
|
|
out_cleanup:
|
|
rte_pmd_mrvl_remove(vdev);
|
|
|
|
out_free_kvlist:
|
|
rte_kvargs_free(kvlist);
|
|
|
|
return ret;
|
|
}
|
|
|
|
/**
|
|
* DPDK callback to remove virtual device.
|
|
*
|
|
* @param vdev
|
|
* Pointer to the removed virtual device.
|
|
*
|
|
* @return
|
|
* 0 on success, negative error value otherwise.
|
|
*/
|
|
static int
|
|
rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
|
|
{
|
|
uint16_t port_id;
|
|
int ret = 0;
|
|
|
|
RTE_ETH_FOREACH_DEV(port_id) {
|
|
if (rte_eth_devices[port_id].device != &vdev->device)
|
|
continue;
|
|
ret |= rte_eth_dev_close(port_id);
|
|
}
|
|
|
|
return ret == 0 ? 0 : -EIO;
|
|
}
|
|
|
|
static struct rte_vdev_driver pmd_mrvl_drv = {
|
|
.probe = rte_pmd_mrvl_probe,
|
|
.remove = rte_pmd_mrvl_remove,
|
|
};
|
|
|
|
RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
|
|
RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
|
|
RTE_LOG_REGISTER_DEFAULT(mrvl_logtype, NOTICE);
|