2021-06-23 04:46:12 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(C) 2021 Marvell.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <cnxk_ethdev.h>
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
|
|
|
|
{
|
|
|
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
int max_rx_pktlen;
|
|
|
|
|
|
|
|
max_rx_pktlen = (roc_nix_max_pkt_len(&dev->nix) + RTE_ETHER_CRC_LEN -
|
|
|
|
CNXK_NIX_MAX_VTAG_ACT_SIZE);
|
|
|
|
|
|
|
|
devinfo->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
|
|
|
|
devinfo->max_rx_pktlen = max_rx_pktlen;
|
|
|
|
devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
|
|
|
|
devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
|
|
|
|
devinfo->max_mac_addrs = dev->max_mac_entries;
|
|
|
|
devinfo->max_vfs = pci_dev->max_vfs;
|
2021-06-23 04:46:33 +00:00
|
|
|
devinfo->max_mtu = devinfo->max_rx_pktlen -
|
|
|
|
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
|
2021-06-23 04:46:12 +00:00
|
|
|
devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
|
|
|
|
|
|
|
|
devinfo->rx_offload_capa = dev->rx_offload_capa;
|
|
|
|
devinfo->tx_offload_capa = dev->tx_offload_capa;
|
|
|
|
devinfo->rx_queue_offload_capa = 0;
|
|
|
|
devinfo->tx_queue_offload_capa = 0;
|
|
|
|
|
|
|
|
devinfo->reta_size = dev->nix.reta_sz;
|
|
|
|
devinfo->hash_key_size = ROC_NIX_RSS_KEY_LEN;
|
|
|
|
devinfo->flow_type_rss_offloads = CNXK_NIX_RSS_OFFLOAD;
|
|
|
|
|
|
|
|
devinfo->default_rxconf = (struct rte_eth_rxconf){
|
|
|
|
.rx_drop_en = 0,
|
|
|
|
.offloads = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
devinfo->default_txconf = (struct rte_eth_txconf){
|
|
|
|
.offloads = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
devinfo->default_rxportconf = (struct rte_eth_dev_portconf){
|
|
|
|
.ring_size = CNXK_NIX_RX_DEFAULT_RING_SZ,
|
|
|
|
};
|
|
|
|
|
|
|
|
devinfo->rx_desc_lim = (struct rte_eth_desc_lim){
|
|
|
|
.nb_max = UINT16_MAX,
|
|
|
|
.nb_min = CNXK_NIX_RX_MIN_DESC,
|
|
|
|
.nb_align = CNXK_NIX_RX_MIN_DESC_ALIGN,
|
|
|
|
.nb_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
|
|
|
|
.nb_mtu_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
|
|
|
|
};
|
|
|
|
devinfo->rx_desc_lim.nb_max =
|
|
|
|
RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
|
|
|
|
CNXK_NIX_RX_MIN_DESC_ALIGN);
|
|
|
|
|
|
|
|
devinfo->tx_desc_lim = (struct rte_eth_desc_lim){
|
|
|
|
.nb_max = UINT16_MAX,
|
|
|
|
.nb_min = 1,
|
|
|
|
.nb_align = 1,
|
|
|
|
.nb_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
|
|
|
|
.nb_mtu_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
devinfo->speed_capa = dev->speed_capa;
|
|
|
|
devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
|
2022-02-14 04:33:51 +00:00
|
|
|
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
|
|
|
|
RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
|
net/cnxk: support mulitiple mbuf pools per Rx queue
Presently, HW is programmed only to receive packets from LPB pool.
Making all packets received from LPB pool.
But, CNXK HW supports two pools,
- SPB -> packets with smaller size (less than 4K)
- LPB -> packets with bigger size (greater than 4K)
Patch enables multiple mempool capability, pool is selected based
on the packet's length. So, basically, PMD programs HW for receiving
packets from both SPB and LPB pools based on the packet's length.
This is achieved by enabling rx multiple mempool offload,
RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL. This allows the application to send
more than one pool(in our case two) to the driver, with different
segment(packet) lengths, which helps the driver to configure both
pools based on segment lengths.
This is often useful for saving the memory where the application
can create a different pool to steer the specific size of the
packet, thus enabling effective use of memory.
Signed-off-by: Hanumanth Pothula <hpothula@marvell.com>
2022-10-07 17:29:20 +00:00
|
|
|
|
|
|
|
devinfo->max_rx_mempools = CNXK_NIX_NUM_POOLS_MAX;
|
|
|
|
|
2021-06-23 04:46:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2021-06-23 04:46:32 +00:00
|
|
|
|
2021-06-23 04:46:37 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_burst_mode *mode)
|
|
|
|
{
|
|
|
|
ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
const struct burst_info {
|
|
|
|
uint64_t flags;
|
|
|
|
const char *output;
|
|
|
|
} rx_offload_map[] = {
|
2021-10-22 11:03:12 +00:00
|
|
|
{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
|
|
|
|
{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
|
2021-06-23 04:46:37 +00:00
|
|
|
};
|
|
|
|
static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
|
|
|
|
"Scalar, Rx Offloads:"
|
|
|
|
};
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
PLT_SET_USED(queue_id);
|
|
|
|
|
|
|
|
/* Update burst mode info */
|
|
|
|
rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
|
|
|
|
str_size - bytes);
|
|
|
|
if (rc < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
bytes += rc;
|
|
|
|
|
|
|
|
/* Update Rx offload info */
|
|
|
|
for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
|
|
|
|
if (dev->rx_offloads & rx_offload_map[i].flags) {
|
|
|
|
rc = rte_strscpy(mode->info + bytes,
|
|
|
|
rx_offload_map[i].output,
|
|
|
|
str_size - bytes);
|
|
|
|
if (rc < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
bytes += rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_burst_mode *mode)
|
|
|
|
{
|
|
|
|
ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
const struct burst_info {
|
|
|
|
uint64_t flags;
|
|
|
|
const char *output;
|
|
|
|
} tx_offload_map[] = {
|
2021-10-22 11:03:12 +00:00
|
|
|
{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
|
|
|
|
{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
|
2021-06-23 04:46:37 +00:00
|
|
|
};
|
|
|
|
static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
|
|
|
|
"Scalar, Tx Offloads:"
|
|
|
|
};
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
PLT_SET_USED(queue_id);
|
|
|
|
|
|
|
|
/* Update burst mode info */
|
|
|
|
rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
|
|
|
|
str_size - bytes);
|
|
|
|
if (rc < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
bytes += rc;
|
|
|
|
|
|
|
|
/* Update Tx offload info */
|
|
|
|
for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
|
|
|
|
if (dev->tx_offloads & tx_offload_map[i].flags) {
|
|
|
|
rc = rte_strscpy(mode->info + bytes,
|
|
|
|
tx_offload_map[i].output,
|
|
|
|
str_size - bytes);
|
|
|
|
if (rc < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
bytes += rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-23 04:46:38 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
enum rte_eth_fc_mode mode_map[] = {
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
|
|
|
|
RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
|
2021-06-23 04:46:38 +00:00
|
|
|
};
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int mode;
|
|
|
|
|
|
|
|
mode = roc_nix_fc_mode_get(nix);
|
|
|
|
if (mode < 0)
|
|
|
|
return mode;
|
|
|
|
|
|
|
|
memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
|
|
|
|
fc_conf->mode = mode_map[mode];
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
|
|
|
|
{
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
struct roc_nix_fc_cfg fc_cfg;
|
|
|
|
struct roc_nix_cq *cq;
|
2022-06-16 09:24:16 +00:00
|
|
|
struct roc_nix_rq *rq;
|
2021-06-23 04:46:38 +00:00
|
|
|
|
|
|
|
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
|
2022-06-16 09:24:16 +00:00
|
|
|
rq = &dev->rqs[qid];
|
2021-06-23 04:46:38 +00:00
|
|
|
cq = &dev->cqs[qid];
|
2022-06-16 09:24:16 +00:00
|
|
|
fc_cfg.type = ROC_NIX_FC_RQ_CFG;
|
|
|
|
fc_cfg.rq_cfg.enable = enable;
|
|
|
|
fc_cfg.rq_cfg.tc = 0;
|
|
|
|
fc_cfg.rq_cfg.rq = qid;
|
|
|
|
fc_cfg.rq_cfg.pool = rq->aura_handle;
|
|
|
|
fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
|
2021-06-23 04:46:38 +00:00
|
|
|
|
|
|
|
return roc_nix_fc_config_set(nix, &fc_cfg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
enum roc_nix_fc_mode mode_map[] = {
|
|
|
|
ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
|
|
|
|
ROC_NIX_FC_TX, ROC_NIX_FC_FULL
|
|
|
|
};
|
|
|
|
struct rte_eth_dev_data *data = eth_dev->data;
|
|
|
|
struct cnxk_fc_cfg *fc = &dev->fc_cfg;
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
2022-02-22 10:37:50 +00:00
|
|
|
struct cnxk_eth_rxq_sp *rxq;
|
|
|
|
struct cnxk_eth_txq_sp *txq;
|
2021-06-23 04:46:38 +00:00
|
|
|
uint8_t rx_pause, tx_pause;
|
|
|
|
int rc, i;
|
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
if (roc_nix_is_sdp(nix))
|
|
|
|
return 0;
|
2021-06-23 04:46:38 +00:00
|
|
|
|
|
|
|
if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
|
|
|
|
fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
|
|
|
|
plt_info("Only MODE configuration is supported");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-10-22 11:03:12 +00:00
|
|
|
rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
|
|
|
|
(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
|
|
|
|
tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
|
|
|
|
(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
|
2021-06-23 04:46:38 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
if (fc_conf->mode == fc->mode) {
|
|
|
|
fc->rx_pause = rx_pause;
|
|
|
|
fc->tx_pause = tx_pause;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-23 04:46:38 +00:00
|
|
|
/* Check if TX pause frame is already enabled or not */
|
|
|
|
if (fc->tx_pause ^ tx_pause) {
|
|
|
|
if (roc_model_is_cn96_ax() && data->dev_started) {
|
|
|
|
/* On Ax, CQ should be in disabled state
|
|
|
|
* while setting flow control configuration.
|
|
|
|
*/
|
|
|
|
plt_info("Stop the port=%d for setting flow control",
|
|
|
|
data->port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < data->nb_rx_queues; i++) {
|
2022-02-22 10:37:50 +00:00
|
|
|
struct roc_nix_fc_cfg fc_cfg;
|
|
|
|
|
|
|
|
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
|
|
|
|
rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
|
|
|
|
1;
|
2022-06-16 09:24:16 +00:00
|
|
|
rxq->tx_pause = !!tx_pause;
|
2022-02-22 10:37:50 +00:00
|
|
|
rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
|
2021-06-23 04:46:38 +00:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-02 15:54:19 +00:00
|
|
|
/* Check if RX pause frame is enabled or not */
|
|
|
|
if (fc->rx_pause ^ rx_pause) {
|
2022-02-22 10:37:50 +00:00
|
|
|
for (i = 0; i < data->nb_tx_queues; i++) {
|
|
|
|
struct roc_nix_fc_cfg fc_cfg;
|
|
|
|
|
|
|
|
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
|
|
|
|
txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
|
|
|
|
1;
|
|
|
|
fc_cfg.type = ROC_NIX_FC_TM_CFG;
|
|
|
|
fc_cfg.tm_cfg.sq = txq->qid;
|
|
|
|
fc_cfg.tm_cfg.enable = !!rx_pause;
|
|
|
|
rc = roc_nix_fc_config_set(nix, &fc_cfg);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
2021-11-02 15:54:19 +00:00
|
|
|
}
|
|
|
|
|
2021-06-23 04:46:38 +00:00
|
|
|
rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
fc->rx_pause = rx_pause;
|
|
|
|
fc->tx_pause = tx_pause;
|
|
|
|
fc->mode = fc_conf->mode;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2022-02-22 10:37:50 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
|
2022-06-16 09:24:16 +00:00
|
|
|
struct rte_eth_pfc_queue_info *pfc_info)
|
2022-02-22 10:37:50 +00:00
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
|
|
|
|
pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
|
|
|
|
pfc_info->mode_capa = RTE_ETH_FC_FULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
|
2022-06-16 09:24:16 +00:00
|
|
|
struct rte_eth_pfc_queue_conf *pfc_conf)
|
2022-02-22 10:37:50 +00:00
|
|
|
{
|
2022-06-16 09:24:16 +00:00
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
enum rte_eth_fc_mode mode;
|
|
|
|
uint8_t en, tc;
|
|
|
|
uint16_t qid;
|
|
|
|
int rc = 0;
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
if (dev->fc_cfg.mode != RTE_ETH_FC_NONE) {
|
|
|
|
plt_err("Disable Flow Control before configuring PFC");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-09-12 13:14:15 +00:00
|
|
|
if (roc_nix_is_sdp(nix) || roc_nix_is_lbk(nix)) {
|
|
|
|
plt_nix_dbg("Prio flow ctrl config is not allowed on SDP/LBK");
|
2022-06-16 09:24:16 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
mode = pfc_conf->mode;
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
/* Perform Tx pause configuration on RQ */
|
|
|
|
qid = pfc_conf->tx_pause.rx_qid;
|
|
|
|
if (qid < eth_dev->data->nb_rx_queues) {
|
|
|
|
en = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
|
|
|
|
tc = pfc_conf->tx_pause.tc;
|
|
|
|
rc = nix_priority_flow_ctrl_rq_conf(eth_dev, qid, en, tc);
|
|
|
|
}
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
/* Perform Rx pause configuration on SQ */
|
|
|
|
qid = pfc_conf->rx_pause.tx_qid;
|
|
|
|
if (qid < eth_dev->data->nb_tx_queues) {
|
|
|
|
en = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
|
|
|
|
tc = pfc_conf->rx_pause.tc;
|
|
|
|
rc |= nix_priority_flow_ctrl_sq_conf(eth_dev, qid, en, tc);
|
|
|
|
}
|
2022-02-22 10:37:50 +00:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
net/cnxk: get flow operations
This patch adds flow ops get operation to enable rte_flow_ops.A
This patch also add support for flow dev dump API.
Every flow rule added will be dumped in the below format.
MCAM Index:1881
Interface :NIX-RX (0)
Priority :1
NPC RX Action:0X00000000404001
ActionOp:NIX_RX_ACTIONOP_UCAST (1)
PF_FUNC: 0X400
RQ Index:0X004
Match Id:0000
Flow Key Alg:0
NPC RX VTAG Action:0X00000000008100
VTAG0:relptr:0
lid:0X1
type:0
Patterns:
NPC_PARSE_NIBBLE_CHAN:000
NPC_PARSE_NIBBLE_LA_LTYPE:LA_ETHER
NPC_PARSE_NIBBLE_LB_LTYPE:NONE
NPC_PARSE_NIBBLE_LC_LTYPE:LC_IP
NPC_PARSE_NIBBLE_LD_LTYPE:LD_TCP
NPC_PARSE_NIBBLE_LE_LTYPE:NONE
LA_ETHER, hdr offset:0, len:0X6, key offset:0X8,\
Data:0X4AE124FC7FFF, Mask:0XFFFFFFFFFFFF
LA_ETHER, hdr offset:0XC, len:0X2, key offset:0X4, Data:0XCA5A,\
Mask:0XFFFF
LC_IP, hdr offset:0XC, len:0X8, key offset:0X10,\
Data:0X0A01010300000000, Mask:0XFFFFFFFF00000000
LD_TCP, hdr offset:0, len:0X4, key offset:0X18, Data:0X03450000,\
Mask:0XFFFF0000
MCAM Raw Data :
DW0 :0000CA5A01202000
DW0_Mask:0000FFFF0FF0F000
DW1 :00004AE124FC7FFF
DW1_Mask:0000FFFFFFFFFFFF
DW2 :0A01010300000000
DW2_Mask:FFFFFFFF00000000
DW3 :0000000003450000
DW3_Mask:00000000FFFF0000
DW4 :0000000000000000
DW4_Mask:0000000000000000
DW5 :0000000000000000
DW5_Mask:0000000000000000
DW6 :0000000000000000
DW6_Mask:0000000000000000
Signed-off-by: Satheesh Paul <psatheesh@marvell.com>
Reviewed-by: Kiran Kumar K <kirankumark@marvell.com>
2021-06-23 04:46:50 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
|
|
|
|
const struct rte_flow_ops **ops)
|
|
|
|
{
|
|
|
|
RTE_SET_USED(eth_dev);
|
|
|
|
|
|
|
|
*ops = &cnxk_flow_ops;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-23 04:46:32 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* Update mac address at NPC */
|
|
|
|
rc = roc_nix_npc_mac_addr_set(nix, addr->addr_bytes);
|
|
|
|
if (rc)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/* Update mac address at CGX for PFs only */
|
|
|
|
if (!roc_nix_is_vf_or_sdp(nix)) {
|
|
|
|
rc = roc_nix_mac_addr_set(nix, addr->addr_bytes);
|
|
|
|
if (rc) {
|
|
|
|
/* Rollback to previous mac address */
|
|
|
|
roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update mac address to cnxk ethernet device */
|
|
|
|
rte_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
|
|
|
|
|
|
|
|
exit:
|
|
|
|
return rc;
|
|
|
|
}
|
2021-06-23 04:46:33 +00:00
|
|
|
|
2021-06-23 04:46:35 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
|
|
|
|
uint32_t index, uint32_t pool)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
PLT_SET_USED(index);
|
|
|
|
PLT_SET_USED(pool);
|
|
|
|
|
|
|
|
rc = roc_nix_mac_addr_add(nix, addr->addr_bytes);
|
|
|
|
if (rc < 0) {
|
|
|
|
plt_err("Failed to add mac address, rc=%d", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable promiscuous mode at NIX level */
|
|
|
|
roc_nix_npc_promisc_ena_dis(nix, true);
|
|
|
|
dev->dmac_filter_enable = true;
|
|
|
|
eth_dev->data->promiscuous = false;
|
2021-06-23 04:47:01 +00:00
|
|
|
dev->dmac_filter_count++;
|
2021-06-23 04:46:35 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = roc_nix_mac_addr_del(nix, index);
|
|
|
|
if (rc)
|
|
|
|
plt_err("Failed to delete mac address, rc=%d", rc);
|
2021-06-23 04:47:01 +00:00
|
|
|
|
|
|
|
dev->dmac_filter_count--;
|
2021-06-23 04:46:35 +00:00
|
|
|
}
|
|
|
|
|
2021-06-23 04:46:33 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
|
|
|
{
|
|
|
|
uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct rte_eth_dev_data *data = eth_dev->data;
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc = -EINVAL;
|
|
|
|
uint32_t buffsz;
|
|
|
|
|
2021-06-23 04:46:54 +00:00
|
|
|
frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
|
|
|
|
|
2021-06-23 04:46:33 +00:00
|
|
|
/* Check if MTU is within the allowed range */
|
|
|
|
if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
|
|
|
|
plt_err("MTU is lesser than minimum");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((frame_size - RTE_ETHER_CRC_LEN) >
|
|
|
|
((uint32_t)roc_nix_max_pkt_len(nix))) {
|
|
|
|
plt_err("MTU is greater than maximum");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
|
|
|
|
old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
|
|
|
|
|
|
|
|
/* Refuse MTU that requires the support of scattered packets
|
|
|
|
* when this feature has not been enabled before.
|
|
|
|
*/
|
|
|
|
if (data->dev_started && frame_size > buffsz &&
|
2021-10-22 11:03:12 +00:00
|
|
|
!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
|
2021-06-23 04:46:33 +00:00
|
|
|
plt_err("Scatter offload is not enabled for mtu");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check <seg size> * <max_seg> >= max_frame */
|
2021-10-22 11:03:12 +00:00
|
|
|
if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) &&
|
2021-06-23 04:46:33 +00:00
|
|
|
frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
|
|
|
|
plt_err("Greater than maximum supported packet length");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
frame_size -= RTE_ETHER_CRC_LEN;
|
|
|
|
|
|
|
|
/* Update mtu on Tx */
|
|
|
|
rc = roc_nix_mac_mtu_set(nix, frame_size);
|
|
|
|
if (rc) {
|
|
|
|
plt_err("Failed to set MTU, rc=%d", rc);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sync same frame size on Rx */
|
|
|
|
rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
|
|
|
|
if (rc) {
|
|
|
|
/* Rollback to older mtu */
|
|
|
|
roc_nix_mac_mtu_set(nix,
|
|
|
|
old_frame_size - RTE_ETHER_CRC_LEN);
|
|
|
|
plt_err("Failed to max Rx frame length, rc=%d", rc);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
exit:
|
|
|
|
return rc;
|
|
|
|
}
|
2021-06-23 04:46:34 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (roc_nix_is_vf_or_sdp(nix))
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = roc_nix_npc_promisc_ena_dis(nix, true);
|
|
|
|
if (rc) {
|
|
|
|
plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
|
|
|
|
roc_error_msg_get(rc));
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = roc_nix_mac_promisc_mode_enable(nix, true);
|
|
|
|
if (rc) {
|
|
|
|
plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
|
|
|
|
roc_error_msg_get(rc));
|
|
|
|
roc_nix_npc_promisc_ena_dis(nix, false);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (roc_nix_is_vf_or_sdp(nix))
|
|
|
|
return rc;
|
|
|
|
|
2021-06-23 04:46:35 +00:00
|
|
|
rc = roc_nix_npc_promisc_ena_dis(nix, dev->dmac_filter_enable);
|
|
|
|
if (rc) {
|
2021-06-23 04:46:34 +00:00
|
|
|
plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
|
|
|
|
roc_error_msg_get(rc));
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = roc_nix_mac_promisc_mode_enable(nix, false);
|
|
|
|
if (rc) {
|
|
|
|
plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
|
|
|
|
roc_error_msg_get(rc));
|
2021-06-23 04:46:35 +00:00
|
|
|
roc_nix_npc_promisc_ena_dis(nix, !dev->dmac_filter_enable);
|
2021-06-23 04:46:34 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2021-06-23 04:46:35 +00:00
|
|
|
dev->dmac_filter_enable = false;
|
2021-06-23 04:46:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2021-06-23 04:46:36 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
|
2021-12-01 10:29:53 +00:00
|
|
|
return roc_nix_npc_mcast_config(&dev->nix, true,
|
|
|
|
eth_dev->data->promiscuous);
|
2021-06-23 04:46:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
|
|
|
|
return roc_nix_npc_mcast_config(&dev->nix, false,
|
|
|
|
eth_dev->data->promiscuous);
|
|
|
|
}
|
2021-06-23 04:46:39 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc, i;
|
|
|
|
|
|
|
|
if (roc_nix_is_vf_or_sdp(nix))
|
|
|
|
return -ENOTSUP;
|
|
|
|
|
|
|
|
rc = roc_nix_mac_link_state_set(nix, true);
|
|
|
|
if (rc)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/* Start tx queues */
|
|
|
|
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
|
|
|
|
rc = cnxk_nix_tx_queue_start(eth_dev, i);
|
|
|
|
if (rc)
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc, i;
|
|
|
|
|
|
|
|
if (roc_nix_is_vf_or_sdp(nix))
|
|
|
|
return -ENOTSUP;
|
|
|
|
|
|
|
|
/* Stop tx queues */
|
|
|
|
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
|
|
|
|
rc = cnxk_nix_tx_queue_stop(eth_dev, i);
|
|
|
|
if (rc)
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = roc_nix_mac_link_state_set(nix, false);
|
|
|
|
exit:
|
|
|
|
return rc;
|
|
|
|
}
|
2021-06-23 04:46:40 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_eth_dev_module_info *modinfo)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix_eeprom_info eeprom_info = {0};
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
modinfo->type = eeprom_info.sff_id;
|
|
|
|
modinfo->eeprom_len = ROC_NIX_EEPROM_SIZE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_dev_eeprom_info *info)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix_eeprom_info eeprom_info = {0};
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc = -EINVAL;
|
|
|
|
|
|
|
|
if (!info->data || !info->length ||
|
|
|
|
(info->offset + info->length > ROC_NIX_EEPROM_SIZE))
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
rte_memcpy(info->data, eeprom_info.buf + info->offset, info->length);
|
|
|
|
return 0;
|
|
|
|
}
|
2021-06-23 04:46:41 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
|
|
|
|
roc_nix_rx_queue_intr_enable(&dev->nix, rx_queue_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
|
|
|
|
uint16_t rx_queue_id)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
|
|
|
|
roc_nix_rx_queue_intr_disable(&dev->nix, rx_queue_id);
|
|
|
|
return 0;
|
|
|
|
}
|
2021-06-23 04:46:42 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
|
|
|
|
{
|
|
|
|
RTE_SET_USED(eth_dev);
|
|
|
|
|
|
|
|
if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2021-06-23 04:46:45 +00:00
|
|
|
|
2021-06-23 04:46:51 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
|
|
|
|
size_t fw_size)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
const char *str = roc_npc_profile_name_get(&dev->npc);
|
|
|
|
uint32_t size = strlen(str) + 1;
|
|
|
|
|
|
|
|
if (fw_size > size)
|
|
|
|
fw_size = size;
|
|
|
|
|
|
|
|
rte_strlcpy(fw_version, str, fw_size);
|
|
|
|
|
|
|
|
if (fw_size < size)
|
|
|
|
return size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-23 04:46:45 +00:00
|
|
|
void
|
|
|
|
cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
|
|
|
|
struct rte_eth_rxq_info *qinfo)
|
|
|
|
{
|
|
|
|
void *rxq = eth_dev->data->rx_queues[qid];
|
|
|
|
struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
|
|
|
|
|
|
|
|
memset(qinfo, 0, sizeof(*qinfo));
|
|
|
|
|
|
|
|
qinfo->mp = rxq_sp->qconf.mp;
|
|
|
|
qinfo->scattered_rx = eth_dev->data->scattered_rx;
|
|
|
|
qinfo->nb_desc = rxq_sp->qconf.nb_desc;
|
|
|
|
|
|
|
|
memcpy(&qinfo->conf, &rxq_sp->qconf.conf.rx, sizeof(qinfo->conf));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
|
|
|
|
struct rte_eth_txq_info *qinfo)
|
|
|
|
{
|
|
|
|
void *txq = eth_dev->data->tx_queues[qid];
|
|
|
|
struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
|
|
|
|
|
|
|
|
memset(qinfo, 0, sizeof(*qinfo));
|
|
|
|
|
|
|
|
qinfo->nb_desc = txq_sp->qconf.nb_desc;
|
|
|
|
|
|
|
|
memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
|
|
|
|
}
|
2021-06-23 04:46:47 +00:00
|
|
|
|
2022-01-19 09:43:27 +00:00
|
|
|
uint32_t
|
|
|
|
cnxk_nix_rx_queue_count(void *rxq)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
|
|
|
|
struct roc_nix *nix = &rxq_sp->dev->nix;
|
|
|
|
uint32_t head, tail;
|
|
|
|
|
|
|
|
roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
|
|
|
|
return (tail - head) % (rxq_sp->qconf.nb_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset, bool is_rx)
|
|
|
|
{
|
|
|
|
/* Check given offset(queue index) has packet filled/xmit by HW
|
|
|
|
* in case of Rx or Tx.
|
|
|
|
* Also, checks for wrap around case.
|
|
|
|
*/
|
|
|
|
return ((tail > head && offset <= tail && offset >= head) ||
|
|
|
|
(head > tail && (offset >= head || offset <= tail))) ?
|
|
|
|
is_rx :
|
|
|
|
!is_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
|
|
|
|
struct roc_nix *nix = &rxq_sp->dev->nix;
|
|
|
|
uint32_t head, tail;
|
|
|
|
|
|
|
|
if (rxq_sp->qconf.nb_desc <= offset)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
|
|
|
|
|
|
|
|
if (nix_offset_has_packet(head, tail, offset, 1))
|
|
|
|
return RTE_ETH_RX_DESC_DONE;
|
|
|
|
else
|
|
|
|
return RTE_ETH_RX_DESC_AVAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
|
|
|
|
struct roc_nix *nix = &txq_sp->dev->nix;
|
|
|
|
uint32_t head = 0, tail = 0;
|
|
|
|
|
|
|
|
if (txq_sp->qconf.nb_desc <= offset)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
roc_nix_sq_head_tail_get(nix, txq_sp->qid, &head, &tail);
|
|
|
|
|
|
|
|
if (nix_offset_has_packet(head, tail, offset, 0))
|
|
|
|
return RTE_ETH_TX_DESC_DONE;
|
|
|
|
else
|
|
|
|
return RTE_ETH_TX_DESC_FULL;
|
|
|
|
}
|
|
|
|
|
2021-06-23 04:46:47 +00:00
|
|
|
/* It is a NOP for cnxk as HW frees the buffer on xmit */
|
|
|
|
int
|
|
|
|
cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
|
|
|
|
{
|
|
|
|
RTE_SET_USED(txq);
|
|
|
|
RTE_SET_USED(free_cnt);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2021-06-23 04:46:52 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
uint64_t *data = regs->data;
|
|
|
|
int rc = -ENOTSUP;
|
|
|
|
|
|
|
|
if (data == NULL) {
|
|
|
|
rc = roc_nix_lf_get_reg_count(nix);
|
|
|
|
if (rc > 0) {
|
|
|
|
regs->length = rc;
|
|
|
|
regs->width = 8;
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!regs->length ||
|
|
|
|
regs->length == (uint32_t)roc_nix_lf_get_reg_count(nix))
|
|
|
|
return roc_nix_lf_reg_dump(nix, data);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2021-06-23 04:47:00 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
uint16_t reta[ROC_NIX_RSS_RETA_MAX];
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int i, j, rc = -EINVAL, idx = 0;
|
|
|
|
|
|
|
|
if (reta_size != dev->nix.reta_sz) {
|
|
|
|
plt_err("Size of hash lookup table configured (%d) does not "
|
|
|
|
"match the number hardware can supported (%d)",
|
|
|
|
reta_size, dev->nix.reta_sz);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2022-02-24 08:35:28 +00:00
|
|
|
roc_nix_rss_reta_get(nix, 0, reta);
|
|
|
|
|
2021-06-23 04:47:00 +00:00
|
|
|
/* Copy RETA table */
|
2021-10-22 11:03:12 +00:00
|
|
|
for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
|
|
|
|
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
|
2021-06-23 04:47:00 +00:00
|
|
|
if ((reta_conf[i].mask >> j) & 0x01)
|
|
|
|
reta[idx] = reta_conf[i].reta[j];
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return roc_nix_rss_reta_set(nix, 0, reta);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
uint16_t reta[ROC_NIX_RSS_RETA_MAX];
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc = -EINVAL, i, j, idx = 0;
|
|
|
|
|
|
|
|
if (reta_size != dev->nix.reta_sz) {
|
|
|
|
plt_err("Size of hash lookup table configured (%d) does not "
|
|
|
|
"match the number hardware can supported (%d)",
|
|
|
|
reta_size, dev->nix.reta_sz);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = roc_nix_rss_reta_get(nix, 0, reta);
|
|
|
|
if (rc)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
/* Copy RETA table */
|
2021-10-22 11:03:12 +00:00
|
|
|
for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
|
|
|
|
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
|
2021-06-23 04:47:00 +00:00
|
|
|
if ((reta_conf[i].mask >> j) & 0x01)
|
|
|
|
reta_conf[i].reta[j] = reta[idx];
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2022-09-12 13:14:25 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_eth_dev_priv_dump(struct rte_eth_dev *eth_dev, FILE *file)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *roc_nix = &dev->nix;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
roc_nix_dump(roc_nix, file);
|
|
|
|
|
|
|
|
for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
|
|
|
|
roc_nix_rq_dump(&dev->rqs[i], file);
|
|
|
|
|
|
|
|
for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
|
|
|
|
roc_nix_cq_dump(&dev->cqs[i], file);
|
|
|
|
|
|
|
|
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
|
|
|
|
roc_nix_sq_dump(&dev->sqs[i], file);
|
|
|
|
|
|
|
|
roc_nix_queues_ctx_dump(roc_nix, file);
|
|
|
|
|
|
|
|
roc_nix_tm_dump(roc_nix, file);
|
|
|
|
|
|
|
|
roc_nix_inl_dev_dump(NULL, file);
|
|
|
|
|
|
|
|
roc_nix_inl_outb_cpt_lfs_dump(roc_nix, file);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-06-23 04:47:00 +00:00
|
|
|
int
|
|
|
|
cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
uint8_t rss_hash_level;
|
|
|
|
uint32_t flowkey_cfg;
|
|
|
|
int rc = -EINVAL;
|
|
|
|
uint8_t alg_idx;
|
|
|
|
|
|
|
|
if (rss_conf->rss_key && rss_conf->rss_key_len != ROC_NIX_RSS_KEY_LEN) {
|
|
|
|
plt_err("Hash key size mismatch %d vs %d",
|
|
|
|
rss_conf->rss_key_len, ROC_NIX_RSS_KEY_LEN);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rss_conf->rss_key)
|
|
|
|
roc_nix_rss_key_set(nix, rss_conf->rss_key);
|
|
|
|
|
2021-10-22 11:03:12 +00:00
|
|
|
rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
|
2021-06-23 04:47:00 +00:00
|
|
|
if (rss_hash_level)
|
|
|
|
rss_hash_level -= 1;
|
|
|
|
flowkey_cfg =
|
|
|
|
cnxk_rss_ethdev_to_nix(dev, rss_conf->rss_hf, rss_hash_level);
|
|
|
|
|
|
|
|
rc = roc_nix_rss_flowkey_set(nix, &alg_idx, flowkey_cfg,
|
|
|
|
ROC_NIX_RSS_GROUP_DEFAULT,
|
|
|
|
ROC_NIX_RSS_MCAM_IDX_DEFAULT);
|
|
|
|
if (rc) {
|
|
|
|
plt_err("Failed to set RSS hash function rc=%d", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
|
|
|
|
if (rss_conf->rss_key)
|
|
|
|
roc_nix_rss_key_get(&dev->nix, rss_conf->rss_key);
|
|
|
|
|
|
|
|
rss_conf->rss_key_len = ROC_NIX_RSS_KEY_LEN;
|
|
|
|
rss_conf->rss_hf = dev->ethdev_rss_hf;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2021-06-23 04:47:01 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
|
|
|
|
struct rte_ether_addr *mc_addr_set,
|
|
|
|
uint32_t nb_mc_addr)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct rte_eth_dev_data *data = eth_dev->data;
|
|
|
|
struct rte_ether_addr null_mac_addr;
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
int rc, index;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
memset(&null_mac_addr, 0, sizeof(null_mac_addr));
|
|
|
|
|
|
|
|
/* All configured multicast filters should be flushed first */
|
|
|
|
for (i = 0; i < dev->max_mac_entries; i++) {
|
|
|
|
if (rte_is_multicast_ether_addr(&data->mac_addrs[i])) {
|
|
|
|
rc = roc_nix_mac_addr_del(nix, i);
|
|
|
|
if (rc) {
|
|
|
|
plt_err("Failed to flush mcast address, rc=%d",
|
|
|
|
rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->dmac_filter_count--;
|
|
|
|
/* Update address in NIC data structure */
|
|
|
|
rte_ether_addr_copy(&null_mac_addr,
|
|
|
|
&data->mac_addrs[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mc_addr_set || !nb_mc_addr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Check for available space */
|
|
|
|
if (nb_mc_addr >
|
|
|
|
((uint32_t)(dev->max_mac_entries - dev->dmac_filter_count))) {
|
|
|
|
plt_err("No space is available to add multicast filters");
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Multicast addresses are to be installed */
|
|
|
|
for (i = 0; i < nb_mc_addr; i++) {
|
|
|
|
index = roc_nix_mac_addr_add(nix, mc_addr_set[i].addr_bytes);
|
|
|
|
if (index < 0) {
|
|
|
|
plt_err("Failed to add mcast mac address, rc=%d",
|
|
|
|
index);
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->dmac_filter_count++;
|
|
|
|
/* Update address in NIC data structure */
|
|
|
|
rte_ether_addr_copy(&mc_addr_set[i], &data->mac_addrs[index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
roc_nix_npc_promisc_ena_dis(nix, true);
|
|
|
|
dev->dmac_filter_enable = true;
|
|
|
|
eth_dev->data->promiscuous = false;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2022-02-22 10:37:50 +00:00
|
|
|
|
|
|
|
int
|
2022-06-16 09:24:16 +00:00
|
|
|
nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
|
|
|
|
uint8_t tx_pause, uint8_t tc)
|
2022-02-22 10:37:50 +00:00
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct rte_eth_dev_data *data = eth_dev->data;
|
|
|
|
struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
|
|
|
struct roc_nix_pfc_cfg pfc_cfg;
|
|
|
|
struct roc_nix_fc_cfg fc_cfg;
|
|
|
|
struct cnxk_eth_rxq_sp *rxq;
|
2022-06-16 09:24:16 +00:00
|
|
|
enum roc_nix_fc_mode mode;
|
|
|
|
struct roc_nix_rq *rq;
|
2022-02-22 10:37:50 +00:00
|
|
|
struct roc_nix_cq *cq;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (roc_model_is_cn96_ax() && data->dev_started) {
|
|
|
|
/* On Ax, CQ should be in disabled state
|
|
|
|
* while setting flow control configuration.
|
|
|
|
*/
|
|
|
|
plt_info("Stop the port=%d for setting flow control",
|
|
|
|
data->port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
if (data->rx_queues == NULL)
|
|
|
|
return -EINVAL;
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
if (qid >= eth_dev->data->nb_rx_queues)
|
|
|
|
return -ENOTSUP;
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
/* Configure RQ */
|
|
|
|
rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[qid]) - 1;
|
|
|
|
rq = &dev->rqs[qid];
|
|
|
|
cq = &dev->cqs[qid];
|
2022-03-29 10:28:57 +00:00
|
|
|
|
2022-02-22 10:37:50 +00:00
|
|
|
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
|
2022-06-16 09:24:16 +00:00
|
|
|
fc_cfg.type = ROC_NIX_FC_RQ_CFG;
|
|
|
|
fc_cfg.rq_cfg.tc = tc;
|
|
|
|
fc_cfg.rq_cfg.enable = !!tx_pause;
|
|
|
|
fc_cfg.rq_cfg.rq = rq->qid;
|
|
|
|
fc_cfg.rq_cfg.pool = rxq->qconf.mp->pool_id;
|
|
|
|
fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
|
2022-02-22 10:37:50 +00:00
|
|
|
rc = roc_nix_fc_config_set(nix, &fc_cfg);
|
|
|
|
if (rc)
|
2022-06-16 09:24:16 +00:00
|
|
|
return rc;
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
if (rxq->tx_pause != tx_pause) {
|
|
|
|
if (tx_pause)
|
|
|
|
pfc->tx_pause_en++;
|
|
|
|
else
|
|
|
|
pfc->tx_pause_en--;
|
|
|
|
}
|
|
|
|
|
|
|
|
rxq->tx_pause = !!tx_pause;
|
|
|
|
rxq->tc = tc;
|
2022-02-22 10:37:50 +00:00
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
/* Skip if PFC already enabled in mac */
|
|
|
|
if (pfc->tx_pause_en > 1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Configure MAC block */
|
|
|
|
pfc->class_en = pfc->tx_pause_en ? 0xFF : 0x0;
|
|
|
|
|
|
|
|
if (pfc->rx_pause_en)
|
|
|
|
mode = pfc->tx_pause_en ? ROC_NIX_FC_FULL : ROC_NIX_FC_RX;
|
|
|
|
else
|
|
|
|
mode = pfc->tx_pause_en ? ROC_NIX_FC_TX : ROC_NIX_FC_NONE;
|
|
|
|
|
|
|
|
memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
|
|
|
|
pfc_cfg.mode = mode;
|
|
|
|
pfc_cfg.tc = pfc->class_en;
|
|
|
|
return roc_nix_pfc_mode_set(nix, &pfc_cfg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
|
|
|
|
uint8_t rx_pause, uint8_t tc)
|
|
|
|
{
|
|
|
|
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
|
|
|
|
struct rte_eth_dev_data *data = eth_dev->data;
|
|
|
|
struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
|
|
|
|
struct roc_nix *nix = &dev->nix;
|
2022-06-16 09:24:17 +00:00
|
|
|
struct roc_nix_pfc_cfg pfc_cfg;
|
2022-06-16 09:24:16 +00:00
|
|
|
struct roc_nix_fc_cfg fc_cfg;
|
|
|
|
struct cnxk_eth_txq_sp *txq;
|
2022-06-16 09:24:17 +00:00
|
|
|
enum roc_nix_fc_mode mode;
|
2022-06-16 09:24:16 +00:00
|
|
|
struct roc_nix_sq *sq;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (data->tx_queues == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (qid >= eth_dev->data->nb_tx_queues)
|
|
|
|
return -ENOTSUP;
|
|
|
|
|
2022-09-01 06:16:06 +00:00
|
|
|
/* Check if RX pause frame is enabled or not and
|
|
|
|
* confirm user requested for PFC.
|
|
|
|
*/
|
|
|
|
if (!pfc->rx_pause_en && rx_pause) {
|
2022-02-22 10:37:50 +00:00
|
|
|
if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
|
|
|
|
eth_dev->data->nb_tx_queues > 1) {
|
|
|
|
/*
|
|
|
|
* Disabled xmit will be enabled when
|
|
|
|
* new topology is available.
|
|
|
|
*/
|
|
|
|
rc = roc_nix_tm_hierarchy_disable(nix);
|
|
|
|
if (rc)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
rc = roc_nix_tm_pfc_prepare_tree(nix);
|
|
|
|
if (rc)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
|
|
|
|
true);
|
|
|
|
if (rc)
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-16 09:24:16 +00:00
|
|
|
txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[qid]) - 1;
|
2022-02-22 10:37:50 +00:00
|
|
|
sq = &dev->sqs[txq->qid];
|
|
|
|
memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
|
|
|
|
fc_cfg.type = ROC_NIX_FC_TM_CFG;
|
|
|
|
fc_cfg.tm_cfg.sq = sq->qid;
|
2022-06-16 09:24:16 +00:00
|
|
|
fc_cfg.tm_cfg.tc = tc;
|
2022-02-22 10:37:50 +00:00
|
|
|
fc_cfg.tm_cfg.enable = !!rx_pause;
|
|
|
|
rc = roc_nix_fc_config_set(nix, &fc_cfg);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2022-06-16 09:24:17 +00:00
|
|
|
/* Maintaining a count for SQs which are configured for PFC. This is
|
|
|
|
* required to handle disabling of a particular SQ without affecting
|
|
|
|
* PFC on other SQs.
|
|
|
|
*/
|
|
|
|
if (!fc_cfg.tm_cfg.enable && sq->tc != ROC_NIX_PFC_CLASS_INVALID) {
|
|
|
|
sq->tc = ROC_NIX_PFC_CLASS_INVALID;
|
|
|
|
pfc->rx_pause_en--;
|
|
|
|
} else if (fc_cfg.tm_cfg.enable &&
|
|
|
|
sq->tc == ROC_NIX_PFC_CLASS_INVALID) {
|
|
|
|
sq->tc = tc;
|
|
|
|
pfc->rx_pause_en++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pfc->rx_pause_en > 1)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
if (pfc->tx_pause_en)
|
|
|
|
mode = pfc->rx_pause_en ? ROC_NIX_FC_FULL : ROC_NIX_FC_TX;
|
|
|
|
else
|
|
|
|
mode = pfc->rx_pause_en ? ROC_NIX_FC_RX : ROC_NIX_FC_NONE;
|
|
|
|
|
|
|
|
memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
|
|
|
|
pfc_cfg.mode = mode;
|
|
|
|
pfc_cfg.tc = pfc->class_en;
|
|
|
|
rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
|
2022-02-22 10:37:50 +00:00
|
|
|
exit:
|
|
|
|
return rc;
|
|
|
|
}
|