net/cnxk: add device start and stop

Add device start and stop operation callbacks for
CN9K and CN10K. Device stop is common for both platforms
while device start as some platform dependent portion where
the platform specific offload flags are recomputed and
the right Rx/Tx burst function is chosen.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2021-06-23 10:16:31 +05:30 committed by Jerin Jacob
parent f71b7dbbf0
commit 89df2225c9
6 changed files with 438 additions and 0 deletions

View File

@ -39,6 +39,58 @@ Driver compilation and testing
Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
for details.
#. Running testpmd:
Follow instructions available in the document
:ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
to run testpmd.
Example output:
.. code-block:: console
./<build_dir>/app/dpdk-testpmd -c 0xc -a 0002:02:00.0 -- --portmask=0x1 --nb-cores=1 --port-topology=loop --rxq=1 --txq=1
EAL: Detected 4 lcore(s)
EAL: Detected 1 NUMA nodes
EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
EAL: Selected IOVA mode 'VA'
EAL: No available hugepages reported in hugepages-16777216kB
EAL: No available hugepages reported in hugepages-2048kB
EAL: Probing VFIO support...
EAL: VFIO support initialized
EAL: using IOMMU type 1 (Type 1)
[ 2003.202721] vfio-pci 0002:02:00.0: vfio_cap_init: hiding cap 0x14@0x98
EAL: Probe PCI driver: net_cn10k (177d:a063) device: 0002:02:00.0 (socket 0)
PMD: RoC Model: cn10k
EAL: No legacy callbacks, legacy socket not created
testpmd: create a new mbuf pool <mb_pool_0>: n=155456, size=2176, socket=0
testpmd: preferred mempool ops selected: cn10k_mempool_ops
Configuring Port 0 (socket 0)
PMD: Port 0: Link Up - speed 25000 Mbps - full-duplex
Port 0: link state change event
Port 0: 96:D4:99:72:A5:BF
Checking link statuses...
Done
No commandline core given, start packet forwarding
io packet forwarding - ports=1 - cores=1 - streams=1 - NUMA support enabled, MP allocation mode: native
Logical Core 3 (socket 0) forwards packets on 1 streams:
RX P=0/Q=0 (socket 0) -> TX P=0/Q=0 (socket 0) peer=02:00:00:00:00:00
io packet forwarding packets/burst=32
nb forwarding cores=1 - nb forwarding ports=1
port 0: RX queue number: 1 Tx queue number: 1
Rx offloads=0x0 Tx offloads=0x10000
RX queue: 0
RX desc=4096 - RX free threshold=0
RX threshold registers: pthresh=0 hthresh=0 wthresh=0
RX Offloads=0x0
TX queue: 0
TX desc=512 - TX free threshold=0
TX threshold registers: pthresh=0 hthresh=0 wthresh=0
TX offloads=0x0 - TX RS bit threshold=0
Press enter to exit
Runtime Config Options
----------------------
@ -131,3 +183,35 @@ Runtime Config Options
Above devarg parameters are configurable per device, user needs to pass the
parameters to all the PCIe devices if application requires to configure on
all the ethdev ports.
Limitations
-----------
``mempool_cnxk`` external mempool handler dependency
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The OCTEON CN9K/CN10K SoC family NIC has inbuilt HW assisted external mempool manager.
``net_cnxk`` pmd only works with ``mempool_cnxk`` mempool handler
as it is performance wise most effective way for packet allocation and Tx buffer
recycling on OCTEON TX2 SoC platform.
CRC stripping
~~~~~~~~~~~~~
The OCTEON CN9K/CN10K SoC family NICs strip the CRC for every packet being received by
the host interface irrespective of the offload configuration.
Debugging Options
-----------------
.. _table_cnxk_ethdev_debug_options:
.. table:: cnxk ethdev debug options
+---+------------+-------------------------------------------------------+
| # | Component | EAL log command |
+===+============+=======================================================+
| 1 | NIX | --log-level='pmd\.net.cnxk,8' |
+---+------------+-------------------------------------------------------+
| 2 | NPC | --log-level='pmd\.net.cnxk\.flow,8' |
+---+------------+-------------------------------------------------------+

View File

@ -5,6 +5,98 @@
#include "cn10k_rx.h"
#include "cn10k_tx.h"
static uint16_t
nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct rte_eth_conf *conf = &data->dev_conf;
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
return flags;
}
static uint16_t
nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
uint64_t conf = dev->tx_offloads;
uint16_t flags = 0;
/* Fastpath is dependent on these enums */
RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
offsetof(struct rte_mbuf, buf_iova) + 8);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
offsetof(struct rte_mbuf, buf_iova) + 16);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
offsetof(struct rte_mbuf, ol_flags) + 12);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
conf & DEV_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
if (conf & DEV_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
return flags;
}
static int
cn10k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
{
@ -18,6 +110,7 @@ cn10k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
dev->ptype_disable = 1;
}
cn10k_eth_set_rx_function(eth_dev);
return 0;
}
@ -163,6 +256,10 @@ cn10k_nix_configure(struct rte_eth_dev *eth_dev)
if (rc)
return rc;
/* Update offload flags */
dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
" tx_offload_flags=0x%x",
eth_dev->data->port_id, dev->rx_offload_flags,
@ -170,6 +267,28 @@ cn10k_nix_configure(struct rte_eth_dev *eth_dev)
return 0;
}
static int
cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
int rc;
/* Common eth dev start */
rc = cnxk_nix_dev_start(eth_dev);
if (rc)
return rc;
/* Setting up the rx[tx]_offload_flags due to change
* in rx[tx]_offloads.
*/
dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
cn10k_eth_set_tx_function(eth_dev);
cn10k_eth_set_rx_function(eth_dev);
return 0;
}
/* Update platform specific eth dev ops */
static void
nix_eth_dev_ops_override(void)
@ -185,6 +304,7 @@ nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.tx_queue_setup = cn10k_nix_tx_queue_setup;
cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
cnxk_eth_dev_ops.dev_start = cn10k_nix_dev_start;
cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
}
@ -222,6 +342,10 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
if (!eth_dev)
return -ENOENT;
/* Setup callbacks for secondary process */
cn10k_eth_set_tx_function(eth_dev);
cn10k_eth_set_rx_function(eth_dev);
}
return 0;
}

View File

@ -5,6 +5,98 @@
#include "cn9k_rx.h"
#include "cn9k_tx.h"
static uint16_t
nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct rte_eth_conf *conf = &data->dev_conf;
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
return flags;
}
static uint16_t
nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
uint64_t conf = dev->tx_offloads;
uint16_t flags = 0;
/* Fastpath is dependent on these enums */
RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
offsetof(struct rte_mbuf, buf_iova) + 8);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
offsetof(struct rte_mbuf, buf_iova) + 16);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
offsetof(struct rte_mbuf, ol_flags) + 12);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
conf & DEV_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
if (conf & DEV_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
return flags;
}
static int
cn9k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
{
@ -18,6 +110,7 @@ cn9k_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
dev->ptype_disable = 1;
}
cn9k_eth_set_rx_function(eth_dev);
return 0;
}
@ -172,6 +265,10 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
if (rc)
return rc;
/* Update offload flags */
dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
" tx_offload_flags=0x%x",
eth_dev->data->port_id, dev->rx_offload_flags,
@ -179,6 +276,28 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
return 0;
}
static int
cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
int rc;
/* Common eth dev start */
rc = cnxk_nix_dev_start(eth_dev);
if (rc)
return rc;
/* Setting up the rx[tx]_offload_flags due to change
* in rx[tx]_offloads.
*/
dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
cn9k_eth_set_tx_function(eth_dev);
cn9k_eth_set_rx_function(eth_dev);
return 0;
}
/* Update platform specific eth dev ops */
static void
nix_eth_dev_ops_override(void)
@ -194,6 +313,7 @@ nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
cnxk_eth_dev_ops.dev_start = cn9k_nix_dev_start;
cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
}
@ -233,6 +353,13 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (!eth_dev)
return -ENOENT;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
/* Setup callbacks for secondary process */
cn9k_eth_set_tx_function(eth_dev);
cn9k_eth_set_rx_function(eth_dev);
return 0;
}
dev = cnxk_eth_pmd_priv(eth_dev);
/* Update capabilities already set for TSO.
* TSO not supported for earlier chip revisions

View File

@ -955,12 +955,102 @@ done:
return rc;
}
static int
cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
struct rte_mbuf *rx_pkts[32];
int count, i, j, rc;
void *rxq;
/* Disable switch hdr pkind */
roc_nix_switch_hdr_set(&dev->nix, 0);
/* Stop link change events */
if (!roc_nix_is_vf_or_sdp(&dev->nix))
roc_nix_mac_link_event_start_stop(&dev->nix, false);
/* Disable Rx via NPC */
roc_nix_npc_rx_ena_dis(&dev->nix, false);
/* Stop rx queues and free up pkts pending */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rc = dev_ops->rx_queue_stop(eth_dev, i);
if (rc)
continue;
rxq = eth_dev->data->rx_queues[i];
count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
while (count) {
for (j = 0; j < count; j++)
rte_pktmbuf_free(rx_pkts[j]);
count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
}
}
/* Stop tx queues */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
dev_ops->tx_queue_stop(eth_dev, i);
return 0;
}
int
cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
int rc, i;
/* Start rx queues */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rc = cnxk_nix_rx_queue_start(eth_dev, i);
if (rc)
return rc;
}
/* Start tx queues */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
rc = cnxk_nix_tx_queue_start(eth_dev, i);
if (rc)
return rc;
}
/* Enable Rx in NPC */
rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
if (rc) {
plt_err("Failed to enable NPC rx %d", rc);
return rc;
}
cnxk_nix_toggle_flag_link_cfg(dev, true);
/* Start link change events */
if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
if (rc) {
plt_err("Failed to start cgx link event %d", rc);
goto rx_disable;
}
}
cnxk_nix_toggle_flag_link_cfg(dev, false);
return 0;
rx_disable:
roc_nix_npc_rx_ena_dis(&dev->nix, false);
cnxk_nix_toggle_flag_link_cfg(dev, false);
return rc;
}
/* CNXK platform independent eth dev ops */
struct eth_dev_ops cnxk_eth_dev_ops = {
.dev_infos_get = cnxk_nix_info_get,
.link_update = cnxk_nix_link_update,
.tx_queue_release = cnxk_nix_tx_queue_release,
.rx_queue_release = cnxk_nix_rx_queue_release,
.dev_stop = cnxk_nix_dev_stop,
.tx_queue_start = cnxk_nix_tx_queue_start,
.rx_queue_start = cnxk_nix_rx_queue_start,
.rx_queue_stop = cnxk_nix_rx_queue_stop,

View File

@ -229,6 +229,7 @@ int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev);
uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
@ -237,6 +238,7 @@ uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
uint8_t rss_level);
/* Link */
void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
void cnxk_eth_dev_link_status_cb(struct roc_nix *nix,
struct roc_nix_link_info *link);
int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);

View File

@ -4,6 +4,17 @@
#include "cnxk_ethdev.h"
void
cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set)
{
if (set)
dev->flags |= CNXK_LINK_CFG_IN_PROGRESS_F;
else
dev->flags &= ~CNXK_LINK_CFG_IN_PROGRESS_F;
rte_wmb();
}
static inline int
nix_wait_for_link_cfg(struct cnxk_eth_dev *dev)
{