2018-01-08 13:35:35 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2017-03-16 12:41:08 +00:00
|
|
|
*
|
2021-03-11 10:00:40 +00:00
|
|
|
* Copyright(c) 2019-2021 Xilinx, Inc.
|
2020-03-30 10:27:26 +00:00
|
|
|
* Copyright(c) 2016-2019 Solarflare Communications Inc.
|
2016-11-29 16:18:33 +00:00
|
|
|
*
|
|
|
|
* This software was jointly developed between OKTET Labs (under contract
|
|
|
|
* for Solarflare) and Solarflare Communications, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <rte_dev.h>
|
2021-01-29 16:48:19 +00:00
|
|
|
#include <ethdev_driver.h>
|
|
|
|
#include <ethdev_pci.h>
|
2016-11-29 16:18:33 +00:00
|
|
|
#include <rte_pci.h>
|
2017-10-26 10:06:08 +00:00
|
|
|
#include <rte_bus_pci.h>
|
2017-03-20 10:15:10 +00:00
|
|
|
#include <rte_errno.h>
|
2018-05-14 05:00:47 +00:00
|
|
|
#include <rte_string_fns.h>
|
2019-04-08 14:04:05 +00:00
|
|
|
#include <rte_ether.h>
|
2016-11-29 16:18:33 +00:00
|
|
|
|
2016-11-29 16:19:03 +00:00
|
|
|
#include "efx.h"
|
|
|
|
|
2016-11-29 16:18:33 +00:00
|
|
|
#include "sfc.h"
|
|
|
|
#include "sfc_debug.h"
|
|
|
|
#include "sfc_log.h"
|
|
|
|
#include "sfc_kvargs.h"
|
2016-11-29 16:19:14 +00:00
|
|
|
#include "sfc_ev.h"
|
2016-11-29 16:19:17 +00:00
|
|
|
#include "sfc_rx.h"
|
2016-11-29 16:19:25 +00:00
|
|
|
#include "sfc_tx.h"
|
2017-03-09 15:26:27 +00:00
|
|
|
#include "sfc_flow.h"
|
2017-03-20 10:15:10 +00:00
|
|
|
#include "sfc_dp.h"
|
|
|
|
#include "sfc_dp_rx.h"
|
|
|
|
|
2018-03-21 11:28:17 +00:00
|
|
|
uint32_t sfc_logtype_driver;
|
|
|
|
|
2017-03-20 10:15:10 +00:00
|
|
|
static struct sfc_dp_list sfc_dp_head =
|
|
|
|
TAILQ_HEAD_INITIALIZER(sfc_dp_head);
|
2016-11-29 16:18:33 +00:00
|
|
|
|
2019-06-21 09:40:41 +00:00
|
|
|
|
|
|
|
static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev);
|
|
|
|
|
|
|
|
|
2017-03-16 11:01:35 +00:00
|
|
|
static int
|
|
|
|
sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-03-16 11:01:35 +00:00
|
|
|
efx_nic_fw_info_t enfi;
|
|
|
|
int ret;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = efx_nic_get_fw_version(sa->nic, &enfi);
|
|
|
|
if (rc != 0)
|
|
|
|
return -rc;
|
|
|
|
|
|
|
|
ret = snprintf(fw_version, fw_size,
|
|
|
|
"%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
|
|
|
|
enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
|
|
|
|
enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (enfi.enfi_dpcpu_fw_ids_valid) {
|
|
|
|
size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
|
|
|
|
int ret_extra;
|
|
|
|
|
|
|
|
ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
|
|
|
|
fw_size - dpcpu_fw_ids_offset,
|
|
|
|
" rx%" PRIx16 " tx%" PRIx16,
|
|
|
|
enfi.enfi_rx_dpcpu_fw_id,
|
|
|
|
enfi.enfi_tx_dpcpu_fw_id);
|
|
|
|
if (ret_extra < 0)
|
|
|
|
return ret_extra;
|
|
|
|
|
|
|
|
ret += ret_extra;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fw_size < (size_t)(++ret))
|
|
|
|
return ret;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-12 16:42:28 +00:00
|
|
|
static int
|
2016-11-29 16:18:33 +00:00
|
|
|
sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
|
|
|
{
|
2019-02-07 12:17:31 +00:00
|
|
|
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
|
2019-02-07 12:17:49 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2019-02-07 12:17:49 +00:00
|
|
|
struct sfc_rss *rss = &sas->rss;
|
2020-10-20 09:13:23 +00:00
|
|
|
struct sfc_mae *mae = &sa->mae;
|
2018-01-18 09:44:31 +00:00
|
|
|
uint64_t txq_offloads_def = 0;
|
2016-11-29 16:18:33 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
|
2019-04-08 14:04:05 +00:00
|
|
|
dev_info->max_mtu = EFX_MAC_SDU_MAX;
|
|
|
|
|
2016-11-29 16:19:13 +00:00
|
|
|
dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
|
2016-11-29 16:19:15 +00:00
|
|
|
|
2020-09-24 13:02:35 +00:00
|
|
|
dev_info->max_vfs = sa->sriov.num_vfs;
|
|
|
|
|
2016-12-15 12:50:59 +00:00
|
|
|
/* Autonegotiation may be disabled */
|
|
|
|
dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
|
2019-03-19 16:36:00 +00:00
|
|
|
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
|
2016-12-15 12:50:59 +00:00
|
|
|
dev_info->speed_capa |= ETH_LINK_SPEED_1G;
|
2019-03-19 16:36:00 +00:00
|
|
|
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
|
2016-12-15 12:50:59 +00:00
|
|
|
dev_info->speed_capa |= ETH_LINK_SPEED_10G;
|
2019-03-19 16:36:00 +00:00
|
|
|
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
|
2018-03-21 13:51:30 +00:00
|
|
|
dev_info->speed_capa |= ETH_LINK_SPEED_25G;
|
2019-03-19 16:36:00 +00:00
|
|
|
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
|
2016-12-15 12:50:59 +00:00
|
|
|
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
|
2019-03-19 16:36:00 +00:00
|
|
|
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
|
2018-03-21 13:51:30 +00:00
|
|
|
dev_info->speed_capa |= ETH_LINK_SPEED_50G;
|
2019-03-19 16:36:00 +00:00
|
|
|
if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
|
2018-03-21 13:51:30 +00:00
|
|
|
dev_info->speed_capa |= ETH_LINK_SPEED_100G;
|
2016-12-15 12:50:59 +00:00
|
|
|
|
2016-11-29 16:19:17 +00:00
|
|
|
dev_info->max_rx_queues = sa->rxq_max;
|
2016-11-29 16:19:23 +00:00
|
|
|
dev_info->max_tx_queues = sa->txq_max;
|
2016-11-29 16:19:17 +00:00
|
|
|
|
2016-11-29 16:19:15 +00:00
|
|
|
/* By default packets are dropped if no descriptors are available */
|
|
|
|
dev_info->default_rxconf.rx_drop_en = 1;
|
|
|
|
|
2018-01-18 09:44:29 +00:00
|
|
|
dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rx_offload_capa includes both device and queue offloads since
|
|
|
|
* the latter may be requested on a per device basis which makes
|
|
|
|
* sense when some offloads are needed to be set on all queues.
|
|
|
|
*/
|
|
|
|
dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
|
|
|
|
dev_info->rx_queue_offload_capa;
|
2017-12-24 10:46:41 +00:00
|
|
|
|
2018-01-18 09:44:31 +00:00
|
|
|
dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tx_offload_capa includes both device and queue offloads since
|
|
|
|
* the latter may be requested on a per device basis which makes
|
|
|
|
* sense when some offloads are needed to be set on all queues.
|
|
|
|
*/
|
|
|
|
dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
|
|
|
|
dev_info->tx_queue_offload_capa;
|
|
|
|
|
|
|
|
if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
|
|
|
|
txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
|
|
|
|
|
|
|
|
dev_info->default_txconf.offloads |= txq_offloads_def;
|
2017-12-24 10:46:43 +00:00
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
|
2018-04-25 17:51:44 +00:00
|
|
|
uint64_t rte_hf = 0;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < rss->hf_map_nb_entries; ++i)
|
|
|
|
rte_hf |= rss->hf_map[i].rte;
|
|
|
|
|
2016-12-15 12:51:17 +00:00
|
|
|
dev_info->reta_size = EFX_RSS_TBL_SIZE;
|
2017-08-30 18:17:40 +00:00
|
|
|
dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
|
2018-04-25 17:51:44 +00:00
|
|
|
dev_info->flow_type_rss_offloads = rte_hf;
|
2016-12-15 12:51:17 +00:00
|
|
|
}
|
|
|
|
|
2018-01-09 20:24:54 +00:00
|
|
|
/* Initialize to hardware limits */
|
2019-02-07 16:29:15 +00:00
|
|
|
dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries;
|
|
|
|
dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries;
|
2016-11-29 16:19:15 +00:00
|
|
|
/* The RXQ hardware requires that the descriptor count is a power
|
|
|
|
* of 2, but rx_desc_lim cannot properly describe that constraint.
|
|
|
|
*/
|
2019-02-07 16:29:15 +00:00
|
|
|
dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries;
|
2016-11-29 16:19:23 +00:00
|
|
|
|
2018-01-09 20:24:55 +00:00
|
|
|
/* Initialize to hardware limits */
|
2016-11-29 16:19:23 +00:00
|
|
|
dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
|
2019-02-07 16:29:14 +00:00
|
|
|
dev_info->tx_desc_lim.nb_min = sa->txq_min_entries;
|
2016-11-29 16:19:23 +00:00
|
|
|
/*
|
|
|
|
* The TXQ hardware requires that the descriptor count is a power
|
|
|
|
* of 2, but tx_desc_lim cannot properly describe that constraint
|
|
|
|
*/
|
2019-02-07 16:29:14 +00:00
|
|
|
dev_info->tx_desc_lim.nb_align = sa->txq_min_entries;
|
2018-01-09 20:24:54 +00:00
|
|
|
|
2019-02-07 12:17:31 +00:00
|
|
|
if (sap->dp_rx->get_dev_info != NULL)
|
|
|
|
sap->dp_rx->get_dev_info(dev_info);
|
|
|
|
if (sap->dp_tx->get_dev_info != NULL)
|
|
|
|
sap->dp_tx->get_dev_info(dev_info);
|
2018-08-29 07:35:33 +00:00
|
|
|
|
2018-08-29 07:35:34 +00:00
|
|
|
dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
|
|
|
|
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
|
2019-09-12 16:42:28 +00:00
|
|
|
|
2020-10-20 09:13:23 +00:00
|
|
|
if (mae->status == SFC_MAE_STATUS_SUPPORTED) {
|
|
|
|
dev_info->switch_info.name = dev->device->driver->name;
|
|
|
|
dev_info->switch_info.domain_id = mae->switch_domain_id;
|
|
|
|
dev_info->switch_info.port_id = mae->switch_port_id;
|
|
|
|
}
|
|
|
|
|
2019-09-12 16:42:28 +00:00
|
|
|
return 0;
|
2016-11-29 16:18:33 +00:00
|
|
|
}
|
|
|
|
|
2016-12-15 12:51:05 +00:00
|
|
|
static const uint32_t *
|
|
|
|
sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:31 +00:00
|
|
|
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
|
2017-03-20 10:15:10 +00:00
|
|
|
|
2019-02-07 12:17:53 +00:00
|
|
|
return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps);
|
2016-12-15 12:51:05 +00:00
|
|
|
}
|
|
|
|
|
2016-11-29 16:19:04 +00:00
|
|
|
static int
|
|
|
|
sfc_dev_configure(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev_data *dev_data = dev->data;
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-11-29 16:19:04 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
|
|
|
|
dev_data->nb_rx_queues, dev_data->nb_tx_queues);
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
switch (sa->state) {
|
|
|
|
case SFC_ADAPTER_CONFIGURED:
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
case SFC_ADAPTER_INITIALIZED:
|
|
|
|
rc = sfc_configure(sa);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
sfc_err(sa, "unexpected adapter state %u to configure",
|
|
|
|
sa->state);
|
|
|
|
rc = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
sfc_log_init(sa, "done %d", rc);
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
2016-11-29 16:19:06 +00:00
|
|
|
static int
|
|
|
|
sfc_dev_start(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-11-29 16:19:06 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
rc = sfc_start(sa);
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
sfc_log_init(sa, "done %d", rc);
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
2016-11-29 16:19:14 +00:00
|
|
|
static int
|
|
|
|
sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-11-29 16:19:14 +00:00
|
|
|
struct rte_eth_link current_link;
|
2018-01-26 02:01:45 +00:00
|
|
|
int ret;
|
2016-11-29 16:19:14 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
2017-01-12 09:03:22 +00:00
|
|
|
if (sa->state != SFC_ADAPTER_STARTED) {
|
|
|
|
sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link);
|
|
|
|
} else if (wait_to_complete) {
|
2016-11-29 16:19:14 +00:00
|
|
|
efx_link_mode_t link_mode;
|
|
|
|
|
2017-01-12 09:03:20 +00:00
|
|
|
if (efx_port_poll(sa->nic, &link_mode) != 0)
|
|
|
|
link_mode = EFX_LINK_UNKNOWN;
|
2016-11-29 16:19:14 +00:00
|
|
|
sfc_port_link_mode_to_info(link_mode, ¤t_link);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
sfc_ev_mgmt_qpoll(sa);
|
2018-01-26 02:01:45 +00:00
|
|
|
rte_eth_linkstatus_get(dev, ¤t_link);
|
2016-11-29 16:19:14 +00:00
|
|
|
}
|
|
|
|
|
2018-01-26 02:01:45 +00:00
|
|
|
ret = rte_eth_linkstatus_set(dev, ¤t_link);
|
|
|
|
if (ret == 0)
|
2018-03-21 11:28:19 +00:00
|
|
|
sfc_notice(sa, "Link status is %s",
|
|
|
|
current_link.link_status ? "UP" : "DOWN");
|
|
|
|
|
2018-01-26 02:01:45 +00:00
|
|
|
return ret;
|
2016-11-29 16:19:14 +00:00
|
|
|
}
|
|
|
|
|
2020-10-15 13:30:45 +00:00
|
|
|
static int
|
2016-11-29 16:19:06 +00:00
|
|
|
sfc_dev_stop(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-11-29 16:19:06 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
sfc_stop(sa);
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
sfc_log_init(sa, "done");
|
2020-10-15 13:30:45 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-11-29 16:19:06 +00:00
|
|
|
}
|
|
|
|
|
2016-12-15 12:51:00 +00:00
|
|
|
static int
|
|
|
|
sfc_dev_set_link_up(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:51:00 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
rc = sfc_start(sa);
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_dev_set_link_down(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:51:00 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
sfc_stop(sa);
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-28 23:14:31 +00:00
|
|
|
static void
|
|
|
|
sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
free(dev->process_private);
|
2020-10-16 13:32:58 +00:00
|
|
|
rte_eth_dev_release_port(dev);
|
2020-09-28 23:14:31 +00:00
|
|
|
}
|
|
|
|
|
2020-09-28 23:14:10 +00:00
|
|
|
static int
|
2016-11-29 16:19:04 +00:00
|
|
|
sfc_dev_close(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-11-29 16:19:04 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
2020-09-28 23:14:31 +00:00
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
|
|
|
|
sfc_eth_dev_secondary_clear_ops(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-29 16:19:04 +00:00
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
switch (sa->state) {
|
2016-11-29 16:19:06 +00:00
|
|
|
case SFC_ADAPTER_STARTED:
|
|
|
|
sfc_stop(sa);
|
|
|
|
SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
|
|
|
|
/* FALLTHROUGH */
|
2016-11-29 16:19:04 +00:00
|
|
|
case SFC_ADAPTER_CONFIGURED:
|
|
|
|
sfc_close(sa);
|
|
|
|
SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
case SFC_ADAPTER_INITIALIZED:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
sfc_err(sa, "unexpected adapter state %u on close", sa->state);
|
|
|
|
break;
|
|
|
|
}
|
2019-06-21 09:40:41 +00:00
|
|
|
|
|
|
|
/*
|
ethdev: remove old close behaviour
The temporary flag RTE_ETH_DEV_CLOSE_REMOVE is removed.
It was introduced in DPDK 18.11 in order to give time for PMDs to migrate.
The old behaviour was to free only queues when closing a port.
The new behaviour is calling rte_eth_dev_release_port() which does
three more tasks:
- trigger event callback
- reset state and few pointers
- free all generic port resources
The private port resources must be released in the .dev_close callback.
The .remove callback should:
- call .dev_close callback
- call rte_eth_dev_release_port()
- free multi-port device shared resources
Despite waiting two years, some drivers have not migrated,
so they may hit issues with the incompatible new behaviour.
After sending emails, adding logs, and announcing the deprecation,
the only last solution is to declare these drivers as unmaintained:
ionic, liquidio, nfp
Below is a summary of what to implement in those drivers.
* The freeing of private port resources must be moved
from the ".remove(device)" function to the ".dev_close(port)" function.
* If a generic resource (.mac_addrs or .hash_mac_addrs) cannot be freed,
it must be set to NULL in ".dev_close" function to protect from
subsequent rte_eth_dev_release_port() freeing.
* Note 1:
The generic resources are freed in rte_eth_dev_release_port(),
after ".dev_close" is called in rte_eth_dev_close(), but not when
calling ".dev_close" directly from the ".remove" PMD function.
That's why rte_eth_dev_release_port() must still be called explicitly
from ".remove(device)" after calling the ".dev_close" PMD function.
* Note 2:
If a device can have multiple ports, the common resources must be freed
only in the ".remove(device)" function.
* Note 3:
The port is supposed to be in a stopped state when it is closed.
If it is not the case, it is free to the PMD implementation
how to react when trying to close a non-stopped port:
either try to stop it automatically or just return an error.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Reviewed-by: Liron Himi <lironh@marvell.com>
Reviewed-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: Jeff Guo <jia.guo@intel.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
2020-09-28 23:14:29 +00:00
|
|
|
* Cleanup all resources.
|
2019-06-21 09:40:41 +00:00
|
|
|
* Rollback primary process sfc_eth_dev_init() below.
|
|
|
|
*/
|
|
|
|
|
|
|
|
sfc_eth_dev_clear_ops(dev);
|
|
|
|
|
|
|
|
sfc_detach(sa);
|
|
|
|
sfc_unprobe(sa);
|
|
|
|
|
|
|
|
sfc_kvargs_cleanup(sa);
|
|
|
|
|
2016-11-29 16:19:04 +00:00
|
|
|
sfc_adapter_unlock(sa);
|
2019-06-21 09:40:41 +00:00
|
|
|
sfc_adapter_lock_fini(sa);
|
2016-11-29 16:19:04 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "done");
|
2019-06-21 09:40:41 +00:00
|
|
|
|
|
|
|
/* Required for logging, so cleanup last */
|
|
|
|
sa->eth_dev = NULL;
|
|
|
|
|
|
|
|
free(sa);
|
2020-09-28 23:14:10 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-11-29 16:19:04 +00:00
|
|
|
}
|
|
|
|
|
2019-09-14 11:37:24 +00:00
|
|
|
static int
|
2016-12-15 12:51:01 +00:00
|
|
|
sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
|
|
|
|
boolean_t enabled)
|
|
|
|
{
|
|
|
|
struct sfc_port *port;
|
|
|
|
boolean_t *toggle;
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:51:01 +00:00
|
|
|
boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
|
|
|
|
const char *desc = (allmulti) ? "all-multi" : "promiscuous";
|
2019-09-14 11:37:24 +00:00
|
|
|
int rc = 0;
|
2016-12-15 12:51:01 +00:00
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
port = &sa->port;
|
|
|
|
toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
|
|
|
|
|
|
|
|
if (*toggle != enabled) {
|
|
|
|
*toggle = enabled;
|
|
|
|
|
2019-02-07 12:17:50 +00:00
|
|
|
if (sfc_sa2shared(sa)->isolated) {
|
2017-06-20 16:37:09 +00:00
|
|
|
sfc_warn(sa, "isolated mode is active on the port");
|
|
|
|
sfc_warn(sa, "the change is to be applied on the next "
|
|
|
|
"start provided that isolated mode is "
|
|
|
|
"disabled prior the next start");
|
|
|
|
} else if ((sa->state == SFC_ADAPTER_STARTED) &&
|
2019-09-14 11:37:24 +00:00
|
|
|
((rc = sfc_set_rx_mode(sa)) != 0)) {
|
2016-12-15 12:51:01 +00:00
|
|
|
*toggle = !(enabled);
|
2020-03-30 10:25:45 +00:00
|
|
|
sfc_warn(sa, "Failed to %s %s mode, rc = %d",
|
|
|
|
((enabled) ? "enable" : "disable"), desc, rc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For promiscuous and all-multicast filters a
|
|
|
|
* permission failure should be reported as an
|
|
|
|
* unsupported filter.
|
|
|
|
*/
|
|
|
|
if (rc == EPERM)
|
|
|
|
rc = ENOTSUP;
|
2016-12-15 12:51:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
2019-09-14 11:37:24 +00:00
|
|
|
return rc;
|
2016-12-15 12:51:01 +00:00
|
|
|
}
|
|
|
|
|
2019-09-14 11:37:24 +00:00
|
|
|
static int
|
2016-12-15 12:51:01 +00:00
|
|
|
sfc_dev_promisc_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2020-03-10 09:42:09 +00:00
|
|
|
int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
|
|
|
|
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
2016-12-15 12:51:01 +00:00
|
|
|
}
|
|
|
|
|
2019-09-14 11:37:24 +00:00
|
|
|
static int
|
2016-12-15 12:51:01 +00:00
|
|
|
sfc_dev_promisc_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2020-03-10 09:42:09 +00:00
|
|
|
int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
|
|
|
|
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
2016-12-15 12:51:01 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 12:56:10 +00:00
|
|
|
static int
|
2016-12-15 12:51:01 +00:00
|
|
|
sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2020-03-10 09:42:09 +00:00
|
|
|
int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
|
|
|
|
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
2016-12-15 12:51:01 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 12:56:10 +00:00
|
|
|
static int
|
2016-12-15 12:51:01 +00:00
|
|
|
sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2020-03-10 09:42:09 +00:00
|
|
|
int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
|
|
|
|
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
2016-12-15 12:51:01 +00:00
|
|
|
}
|
|
|
|
|
2016-11-29 16:19:17 +00:00
|
|
|
static int
|
|
|
|
sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
|
|
|
uint16_t nb_rx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mb_pool)
|
|
|
|
{
|
2019-02-07 12:17:47 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-11-29 16:19:17 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
|
|
|
|
rx_queue_id, nb_rx_desc, socket_id);
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
|
|
|
|
rx_conf, mb_pool);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_rx_qinit;
|
|
|
|
|
2019-02-07 12:17:47 +00:00
|
|
|
dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp;
|
2016-11-29 16:19:17 +00:00
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_rx_qinit:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
sfc_rx_queue_release(void *queue)
|
|
|
|
{
|
2017-03-20 10:15:10 +00:00
|
|
|
struct sfc_dp_rxq *dp_rxq = queue;
|
|
|
|
struct sfc_rxq *rxq;
|
2016-11-29 16:19:17 +00:00
|
|
|
struct sfc_adapter *sa;
|
|
|
|
unsigned int sw_index;
|
|
|
|
|
2017-03-20 10:15:10 +00:00
|
|
|
if (dp_rxq == NULL)
|
2016-11-29 16:19:17 +00:00
|
|
|
return;
|
|
|
|
|
2017-03-20 10:15:10 +00:00
|
|
|
rxq = sfc_rxq_by_dp_rxq(dp_rxq);
|
2016-11-29 16:19:17 +00:00
|
|
|
sa = rxq->evq->sa;
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2019-02-07 12:17:40 +00:00
|
|
|
sw_index = dp_rxq->dpq.queue_id;
|
2016-11-29 16:19:17 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "RxQ=%u", sw_index);
|
|
|
|
|
|
|
|
sfc_rx_qfini(sa, sw_index);
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
}
|
|
|
|
|
2016-11-29 16:19:25 +00:00
|
|
|
static int
|
|
|
|
sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
|
|
|
uint16_t nb_tx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf)
|
|
|
|
{
|
2019-02-07 12:17:48 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-11-29 16:19:25 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
|
|
|
|
tx_queue_id, nb_tx_desc, socket_id);
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_tx_qinit;
|
|
|
|
|
2019-02-07 12:17:48 +00:00
|
|
|
dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp;
|
2016-11-29 16:19:25 +00:00
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_tx_qinit:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
sfc_tx_queue_release(void *queue)
|
|
|
|
{
|
2017-03-20 10:15:14 +00:00
|
|
|
struct sfc_dp_txq *dp_txq = queue;
|
|
|
|
struct sfc_txq *txq;
|
2016-11-29 16:19:25 +00:00
|
|
|
unsigned int sw_index;
|
|
|
|
struct sfc_adapter *sa;
|
|
|
|
|
2017-03-20 10:15:14 +00:00
|
|
|
if (dp_txq == NULL)
|
2016-11-29 16:19:25 +00:00
|
|
|
return;
|
|
|
|
|
2017-03-20 10:15:14 +00:00
|
|
|
txq = sfc_txq_by_dp_txq(dp_txq);
|
2019-02-07 12:17:41 +00:00
|
|
|
sw_index = dp_txq->dpq.queue_id;
|
2016-11-29 16:19:25 +00:00
|
|
|
|
|
|
|
SFC_ASSERT(txq->evq != NULL);
|
|
|
|
sa = txq->evq->sa;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "TxQ = %u", sw_index);
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
sfc_tx_qfini(sa, sw_index);
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
}
|
|
|
|
|
2019-01-17 06:17:32 +00:00
|
|
|
/*
|
|
|
|
* Some statistics are computed as A - B where A and B each increase
|
|
|
|
* monotonically with some hardware counter(s) and the counters are read
|
|
|
|
* asynchronously.
|
|
|
|
*
|
|
|
|
* If packet X is counted in A, but not counted in B yet, computed value is
|
|
|
|
* greater than real.
|
|
|
|
*
|
|
|
|
* If packet X is not counted in A at the moment of reading the counter,
|
|
|
|
* but counted in B at the moment of reading the counter, computed value
|
|
|
|
* is less than real.
|
|
|
|
*
|
|
|
|
* However, counter which grows backward is worse evil than slightly wrong
|
|
|
|
* value. So, let's try to guarantee that it never happens except may be
|
|
|
|
* the case when the MAC stats are zeroed as a result of a NIC reset.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sfc_update_diff_stat(uint64_t *stat, uint64_t newval)
|
|
|
|
{
|
|
|
|
if ((int64_t)(newval - *stat) > 0 || newval == 0)
|
|
|
|
*stat = newval;
|
|
|
|
}
|
|
|
|
|
2017-10-10 20:20:18 +00:00
|
|
|
static int
|
2016-12-15 12:50:54 +00:00
|
|
|
sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:50:54 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
|
|
|
uint64_t *mac_stats;
|
2017-10-10 20:20:18 +00:00
|
|
|
int ret;
|
2016-12-15 12:50:54 +00:00
|
|
|
|
|
|
|
rte_spinlock_lock(&port->mac_stats_lock);
|
|
|
|
|
2017-10-10 20:20:18 +00:00
|
|
|
ret = sfc_port_update_mac_stats(sa);
|
|
|
|
if (ret != 0)
|
2016-12-15 12:50:54 +00:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
mac_stats = port->mac_stats_buf;
|
|
|
|
|
|
|
|
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
|
|
|
|
EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
|
|
|
|
stats->ipackets =
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
|
|
|
|
stats->opackets =
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
|
|
|
|
stats->ibytes =
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
|
|
|
|
stats->obytes =
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
|
|
|
|
mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
|
2019-01-16 09:43:23 +00:00
|
|
|
stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
|
2016-12-15 12:50:54 +00:00
|
|
|
stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
|
2021-01-20 12:44:18 +00:00
|
|
|
|
|
|
|
/* CRC is included in these stats, but shouldn't be */
|
|
|
|
stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
|
|
|
|
stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
|
2016-12-15 12:50:54 +00:00
|
|
|
} else {
|
|
|
|
stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
|
|
|
|
stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
|
|
|
|
stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
|
2021-01-20 12:44:18 +00:00
|
|
|
|
|
|
|
/* CRC is included in these stats, but shouldn't be */
|
|
|
|
stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
|
|
|
|
stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
|
|
|
|
|
2016-12-15 12:50:54 +00:00
|
|
|
/*
|
|
|
|
* Take into account stats which are whenever supported
|
|
|
|
* on EF10. If some stat is not supported by current
|
|
|
|
* firmware variant or HW revision, it is guaranteed
|
|
|
|
* to be zero in mac_stats.
|
|
|
|
*/
|
|
|
|
stats->imissed =
|
|
|
|
mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
|
|
|
|
mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
|
|
|
|
mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
|
|
|
|
mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
|
|
|
|
mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
|
|
|
|
mac_stats[EFX_MAC_PM_TRUNC_QBB] +
|
|
|
|
mac_stats[EFX_MAC_PM_DISCARD_QBB] +
|
|
|
|
mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
|
|
|
|
mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
|
|
|
|
mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
|
|
|
|
stats->ierrors =
|
|
|
|
mac_stats[EFX_MAC_RX_FCS_ERRORS] +
|
|
|
|
mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
|
|
|
|
mac_stats[EFX_MAC_RX_JABBER_PKTS];
|
|
|
|
/* no oerrors counters supported on EF10 */
|
2019-01-17 06:17:32 +00:00
|
|
|
|
|
|
|
/* Exclude missed, errors and pauses from Rx packets */
|
|
|
|
sfc_update_diff_stat(&port->ipackets,
|
|
|
|
mac_stats[EFX_MAC_RX_PKTS] -
|
|
|
|
mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
|
|
|
|
stats->imissed - stats->ierrors);
|
|
|
|
stats->ipackets = port->ipackets;
|
2016-12-15 12:50:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
rte_spinlock_unlock(&port->mac_stats_lock);
|
2017-10-10 20:20:18 +00:00
|
|
|
SFC_ASSERT(ret >= 0);
|
|
|
|
return -ret;
|
2016-12-15 12:50:54 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 14:34:54 +00:00
|
|
|
static int
|
2017-03-03 12:49:25 +00:00
|
|
|
sfc_stats_reset(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-03-03 12:49:25 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (sa->state != SFC_ADAPTER_STARTED) {
|
|
|
|
/*
|
|
|
|
* The operation cannot be done if port is not started; it
|
|
|
|
* will be scheduled to be done during the next port start
|
|
|
|
*/
|
|
|
|
port->mac_stats_reset_pending = B_TRUE;
|
2019-09-06 14:34:54 +00:00
|
|
|
return 0;
|
2017-03-03 12:49:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rc = sfc_port_reset_mac_stats(sa);
|
|
|
|
if (rc != 0)
|
|
|
|
sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
|
2019-09-06 14:34:54 +00:00
|
|
|
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
2017-03-03 12:49:25 +00:00
|
|
|
}
|
|
|
|
|
2016-12-15 12:50:55 +00:00
|
|
|
static int
|
|
|
|
sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
|
|
|
|
unsigned int xstats_count)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:50:55 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
|
|
|
uint64_t *mac_stats;
|
|
|
|
int rc;
|
|
|
|
unsigned int i;
|
|
|
|
int nstats = 0;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&port->mac_stats_lock);
|
|
|
|
|
|
|
|
rc = sfc_port_update_mac_stats(sa);
|
|
|
|
if (rc != 0) {
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
nstats = -rc;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
mac_stats = port->mac_stats_buf;
|
|
|
|
|
|
|
|
for (i = 0; i < EFX_MAC_NSTATS; ++i) {
|
|
|
|
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
|
|
|
|
if (xstats != NULL && nstats < (int)xstats_count) {
|
|
|
|
xstats[nstats].id = nstats;
|
|
|
|
xstats[nstats].value = mac_stats[i];
|
|
|
|
}
|
|
|
|
nstats++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
rte_spinlock_unlock(&port->mac_stats_lock);
|
|
|
|
|
|
|
|
return nstats;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_xstats_get_names(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
|
|
|
unsigned int xstats_count)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:50:55 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int nstats = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < EFX_MAC_NSTATS; ++i) {
|
|
|
|
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
|
|
|
|
if (xstats_names != NULL && nstats < xstats_count)
|
2018-05-14 05:00:47 +00:00
|
|
|
strlcpy(xstats_names[nstats].name,
|
2016-12-15 12:50:55 +00:00
|
|
|
efx_mac_stat_name(sa->nic, i),
|
|
|
|
sizeof(xstats_names[0].name));
|
|
|
|
nstats++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nstats;
|
|
|
|
}
|
|
|
|
|
2017-07-08 15:45:15 +00:00
|
|
|
static int
|
|
|
|
sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
|
|
|
|
uint64_t *values, unsigned int n)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-07-08 15:45:15 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
|
|
|
uint64_t *mac_stats;
|
|
|
|
unsigned int nb_supported = 0;
|
|
|
|
unsigned int nb_written = 0;
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (unlikely(values == NULL) ||
|
|
|
|
unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
|
|
|
|
return port->mac_stats_nb_supported;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&port->mac_stats_lock);
|
|
|
|
|
|
|
|
rc = sfc_port_update_mac_stats(sa);
|
|
|
|
if (rc != 0) {
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
ret = -rc;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
mac_stats = port->mac_stats_buf;
|
|
|
|
|
|
|
|
for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
|
|
|
|
if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if ((ids == NULL) || (ids[nb_written] == nb_supported))
|
|
|
|
values[nb_written++] = mac_stats[i];
|
|
|
|
|
|
|
|
++nb_supported;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nb_written;
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
rte_spinlock_unlock(&port->mac_stats_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
|
|
|
const uint64_t *ids, unsigned int size)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-07-08 15:45:15 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
|
|
|
unsigned int nb_supported = 0;
|
|
|
|
unsigned int nb_written = 0;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (unlikely(xstats_names == NULL) ||
|
|
|
|
unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
|
|
|
|
return port->mac_stats_nb_supported;
|
|
|
|
|
|
|
|
for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
|
|
|
|
if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
|
|
|
|
char *name = xstats_names[nb_written++].name;
|
|
|
|
|
2018-05-14 05:00:47 +00:00
|
|
|
strlcpy(name, efx_mac_stat_name(sa->nic, i),
|
2017-07-08 15:45:15 +00:00
|
|
|
sizeof(xstats_names[0].name));
|
|
|
|
}
|
|
|
|
|
|
|
|
++nb_supported;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nb_written;
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:50:56 +00:00
|
|
|
static int
|
|
|
|
sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:50:56 +00:00
|
|
|
unsigned int wanted_fc, link_fc;
|
|
|
|
|
|
|
|
memset(fc_conf, 0, sizeof(*fc_conf));
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
if (sa->state == SFC_ADAPTER_STARTED)
|
|
|
|
efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
|
|
|
|
else
|
|
|
|
link_fc = sa->port.flow_ctrl;
|
|
|
|
|
|
|
|
switch (link_fc) {
|
|
|
|
case 0:
|
|
|
|
fc_conf->mode = RTE_FC_NONE;
|
|
|
|
break;
|
|
|
|
case EFX_FCNTL_RESPOND:
|
|
|
|
fc_conf->mode = RTE_FC_RX_PAUSE;
|
|
|
|
break;
|
|
|
|
case EFX_FCNTL_GENERATE:
|
|
|
|
fc_conf->mode = RTE_FC_TX_PAUSE;
|
|
|
|
break;
|
|
|
|
case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
|
|
|
|
fc_conf->mode = RTE_FC_FULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
sfc_err(sa, "%s: unexpected flow control value %#x",
|
|
|
|
__func__, link_fc);
|
|
|
|
}
|
|
|
|
|
|
|
|
fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:50:56 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
|
|
|
unsigned int fcntl;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
|
|
|
|
fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
|
|
|
|
fc_conf->mac_ctrl_frame_fwd != 0) {
|
|
|
|
sfc_err(sa, "unsupported flow control settings specified");
|
|
|
|
rc = EINVAL;
|
|
|
|
goto fail_inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (fc_conf->mode) {
|
|
|
|
case RTE_FC_NONE:
|
|
|
|
fcntl = 0;
|
|
|
|
break;
|
|
|
|
case RTE_FC_RX_PAUSE:
|
|
|
|
fcntl = EFX_FCNTL_RESPOND;
|
|
|
|
break;
|
|
|
|
case RTE_FC_TX_PAUSE:
|
|
|
|
fcntl = EFX_FCNTL_GENERATE;
|
|
|
|
break;
|
|
|
|
case RTE_FC_FULL:
|
|
|
|
fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rc = EINVAL;
|
|
|
|
goto fail_inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
if (sa->state == SFC_ADAPTER_STARTED) {
|
|
|
|
rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_mac_fcntl_set;
|
|
|
|
}
|
|
|
|
|
|
|
|
port->flow_ctrl = fcntl;
|
|
|
|
port->flow_ctrl_autoneg = fc_conf->autoneg;
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_mac_fcntl_set:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
fail_inval:
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
2019-04-23 08:14:30 +00:00
|
|
|
static int
|
|
|
|
sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu)
|
|
|
|
{
|
|
|
|
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
|
|
|
|
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
|
|
|
|
boolean_t scatter_enabled;
|
|
|
|
const char *error;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sas->rxq_count; i++) {
|
|
|
|
if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
scatter_enabled = (sas->rxq_info[i].type_flags &
|
|
|
|
EFX_RXQ_FLAG_SCATTER);
|
|
|
|
|
|
|
|
if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size,
|
|
|
|
encp->enc_rx_prefix_size,
|
2020-10-13 13:45:24 +00:00
|
|
|
scatter_enabled,
|
|
|
|
encp->enc_rx_scatter_max, &error)) {
|
2019-04-23 08:14:30 +00:00
|
|
|
sfc_err(sa, "MTU check for RxQ %u failed: %s", i,
|
|
|
|
error);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:50:58 +00:00
|
|
|
static int
|
|
|
|
sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:50:58 +00:00
|
|
|
size_t pdu = EFX_MAC_PDU(mtu);
|
|
|
|
size_t old_pdu;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "mtu=%u", mtu);
|
|
|
|
|
|
|
|
rc = EINVAL;
|
|
|
|
if (pdu < EFX_MAC_PDU_MIN) {
|
|
|
|
sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
|
|
|
|
(unsigned int)mtu, (unsigned int)pdu,
|
|
|
|
EFX_MAC_PDU_MIN);
|
|
|
|
goto fail_inval;
|
|
|
|
}
|
|
|
|
if (pdu > EFX_MAC_PDU_MAX) {
|
|
|
|
sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
|
|
|
|
(unsigned int)mtu, (unsigned int)pdu,
|
2019-07-24 13:16:32 +00:00
|
|
|
(unsigned int)EFX_MAC_PDU_MAX);
|
2016-12-15 12:50:58 +00:00
|
|
|
goto fail_inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2019-04-23 08:14:30 +00:00
|
|
|
rc = sfc_check_scatter_on_all_rx_queues(sa, pdu);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_check_scatter;
|
|
|
|
|
2016-12-15 12:50:58 +00:00
|
|
|
if (pdu != sa->port.pdu) {
|
|
|
|
if (sa->state == SFC_ADAPTER_STARTED) {
|
|
|
|
sfc_stop(sa);
|
|
|
|
|
|
|
|
old_pdu = sa->port.pdu;
|
|
|
|
sa->port.pdu = pdu;
|
|
|
|
rc = sfc_start(sa);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_start;
|
|
|
|
} else {
|
|
|
|
sa->port.pdu = pdu;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-07-02 21:27:50 +00:00
|
|
|
* The driver does not use it, but other PMDs update jumbo frame
|
2016-12-15 12:50:58 +00:00
|
|
|
* flag and max_rx_pkt_len when MTU is set.
|
|
|
|
*/
|
2021-01-18 07:04:20 +00:00
|
|
|
if (mtu > RTE_ETHER_MTU) {
|
2018-01-18 09:44:29 +00:00
|
|
|
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
|
|
|
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:50:58 +00:00
|
|
|
dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
sfc_log_init(sa, "done");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_start:
|
|
|
|
sa->port.pdu = old_pdu;
|
|
|
|
if (sfc_start(sa) != 0)
|
|
|
|
sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
|
|
|
|
"PDU max size - port is stopped",
|
|
|
|
(unsigned int)pdu, (unsigned int)old_pdu);
|
2019-04-23 08:14:30 +00:00
|
|
|
|
|
|
|
fail_check_scatter:
|
2016-12-15 12:50:58 +00:00
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
fail_inval:
|
|
|
|
sfc_log_init(sa, "failed %d", rc);
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
2018-04-11 16:32:51 +00:00
|
|
|
static int
|
2019-05-21 16:13:03 +00:00
|
|
|
sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
|
2016-12-15 12:51:02 +00:00
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:51:02 +00:00
|
|
|
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
|
2017-06-20 16:37:09 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0];
|
2018-04-11 16:32:51 +00:00
|
|
|
int rc = 0;
|
2016-12-15 12:51:02 +00:00
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2020-07-21 08:58:45 +00:00
|
|
|
if (rte_is_same_ether_addr(mac_addr, &port->default_mac_addr))
|
|
|
|
goto unlock;
|
|
|
|
|
2017-12-20 09:52:14 +00:00
|
|
|
/*
|
|
|
|
* Copy the address to the device private data so that
|
|
|
|
* it could be recalled in the case of adapter restart.
|
|
|
|
*/
|
2019-05-21 16:13:04 +00:00
|
|
|
rte_ether_addr_copy(mac_addr, &port->default_mac_addr);
|
2017-12-20 09:52:14 +00:00
|
|
|
|
2018-04-11 16:32:51 +00:00
|
|
|
/*
|
|
|
|
* Neither of the two following checks can return
|
|
|
|
* an error. The new MAC address is preserved in
|
|
|
|
* the device private data and can be activated
|
|
|
|
* on the next port start if the user prevents
|
|
|
|
* isolated mode from being enabled.
|
|
|
|
*/
|
2019-02-07 12:17:50 +00:00
|
|
|
if (sfc_sa2shared(sa)->isolated) {
|
2018-04-11 16:32:51 +00:00
|
|
|
sfc_warn(sa, "isolated mode is active on the port");
|
|
|
|
sfc_warn(sa, "will not set MAC address");
|
2017-06-20 16:37:09 +00:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:51:02 +00:00
|
|
|
if (sa->state != SFC_ADAPTER_STARTED) {
|
2018-03-21 11:28:19 +00:00
|
|
|
sfc_notice(sa, "the port is not started");
|
|
|
|
sfc_notice(sa, "the new MAC address will be set on port start");
|
2016-12-15 12:51:02 +00:00
|
|
|
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (encp->enc_allow_set_mac_with_installed_filters) {
|
|
|
|
rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
|
|
|
|
if (rc != 0) {
|
|
|
|
sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Changing the MAC address by means of MCDI request
|
|
|
|
* has no effect on received traffic, therefore
|
|
|
|
* we also need to update unicast filters
|
|
|
|
*/
|
2020-03-30 10:25:45 +00:00
|
|
|
rc = sfc_set_rx_mode_unchecked(sa);
|
2018-04-11 16:32:51 +00:00
|
|
|
if (rc != 0) {
|
2016-12-15 12:51:02 +00:00
|
|
|
sfc_err(sa, "cannot set filter (rc = %u)", rc);
|
2018-04-11 16:32:51 +00:00
|
|
|
/* Rollback the old address */
|
|
|
|
(void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
|
2020-03-30 10:25:45 +00:00
|
|
|
(void)sfc_set_rx_mode_unchecked(sa);
|
2018-04-11 16:32:51 +00:00
|
|
|
}
|
2016-12-15 12:51:02 +00:00
|
|
|
} else {
|
|
|
|
sfc_warn(sa, "cannot set MAC address with filters installed");
|
|
|
|
sfc_warn(sa, "adapter will be restarted to pick the new MAC");
|
|
|
|
sfc_warn(sa, "(some traffic may be dropped)");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since setting MAC address with filters installed is not
|
2017-12-20 09:52:14 +00:00
|
|
|
* allowed on the adapter, the new MAC address will be set
|
|
|
|
* by means of adapter restart. sfc_start() shall retrieve
|
|
|
|
* the new address from the device private data and set it.
|
2016-12-15 12:51:02 +00:00
|
|
|
*/
|
|
|
|
sfc_stop(sa);
|
|
|
|
rc = sfc_start(sa);
|
|
|
|
if (rc != 0)
|
|
|
|
sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
2018-04-11 16:32:51 +00:00
|
|
|
if (rc != 0)
|
2019-05-21 16:13:04 +00:00
|
|
|
rte_ether_addr_copy(old_addr, &port->default_mac_addr);
|
2018-04-11 16:32:51 +00:00
|
|
|
|
2016-12-15 12:51:02 +00:00
|
|
|
sfc_adapter_unlock(sa);
|
2018-04-11 16:32:51 +00:00
|
|
|
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
2016-12-15 12:51:02 +00:00
|
|
|
}
|
|
|
|
|
2016-12-15 12:50:58 +00:00
|
|
|
|
2016-12-15 12:51:03 +00:00
|
|
|
static int
|
2019-05-21 16:13:03 +00:00
|
|
|
sfc_set_mc_addr_list(struct rte_eth_dev *dev,
|
|
|
|
struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
|
2016-12-15 12:51:03 +00:00
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-03-09 16:21:30 +00:00
|
|
|
struct sfc_port *port = &sa->port;
|
|
|
|
uint8_t *mc_addrs = port->mcast_addrs;
|
2016-12-15 12:51:03 +00:00
|
|
|
int rc;
|
|
|
|
unsigned int i;
|
|
|
|
|
2019-02-07 12:17:50 +00:00
|
|
|
if (sfc_sa2shared(sa)->isolated) {
|
2017-06-20 16:37:09 +00:00
|
|
|
sfc_err(sa, "isolated mode is active on the port");
|
|
|
|
sfc_err(sa, "will not set multicast address list");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2017-03-09 16:21:30 +00:00
|
|
|
if (mc_addrs == NULL)
|
|
|
|
return -ENOBUFS;
|
|
|
|
|
|
|
|
if (nb_mc_addr > port->max_mcast_addrs) {
|
2016-12-15 12:51:03 +00:00
|
|
|
sfc_err(sa, "too many multicast addresses: %u > %u",
|
2017-03-09 16:21:30 +00:00
|
|
|
nb_mc_addr, port->max_mcast_addrs);
|
2016-12-15 12:51:03 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-03-09 16:21:30 +00:00
|
|
|
for (i = 0; i < nb_mc_addr; ++i) {
|
2017-08-23 15:44:42 +00:00
|
|
|
rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
|
2017-03-09 16:21:30 +00:00
|
|
|
EFX_MAC_ADDR_LEN);
|
|
|
|
mc_addrs += EFX_MAC_ADDR_LEN;
|
2016-12-15 12:51:03 +00:00
|
|
|
}
|
|
|
|
|
2017-03-09 16:21:30 +00:00
|
|
|
port->nb_mcast_addrs = nb_mc_addr;
|
2016-12-15 12:51:03 +00:00
|
|
|
|
2017-03-09 16:21:30 +00:00
|
|
|
if (sa->state != SFC_ADAPTER_STARTED)
|
|
|
|
return 0;
|
2016-12-15 12:51:03 +00:00
|
|
|
|
2017-03-09 16:21:30 +00:00
|
|
|
rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
|
|
|
|
port->nb_mcast_addrs);
|
2016-12-15 12:51:03 +00:00
|
|
|
if (rc != 0)
|
|
|
|
sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
|
|
|
|
|
2018-07-18 07:40:06 +00:00
|
|
|
SFC_ASSERT(rc >= 0);
|
2016-12-15 12:51:03 +00:00
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
2017-05-18 14:00:04 +00:00
|
|
|
/*
|
|
|
|
* The function is used by the secondary process as well. It must not
|
|
|
|
* use any process-local pointers from the adapter data.
|
|
|
|
*/
|
2016-12-15 12:51:06 +00:00
|
|
|
static void
|
|
|
|
sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
|
|
|
struct rte_eth_rxq_info *qinfo)
|
|
|
|
{
|
2019-02-07 12:17:47 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2016-12-15 12:51:06 +00:00
|
|
|
struct sfc_rxq_info *rxq_info;
|
|
|
|
|
2019-02-07 12:17:47 +00:00
|
|
|
SFC_ASSERT(rx_queue_id < sas->rxq_count);
|
2016-12-15 12:51:06 +00:00
|
|
|
|
2019-02-07 12:17:47 +00:00
|
|
|
rxq_info = &sas->rxq_info[rx_queue_id];
|
2016-12-15 12:51:06 +00:00
|
|
|
|
2019-02-07 12:17:27 +00:00
|
|
|
qinfo->mp = rxq_info->refill_mb_pool;
|
|
|
|
qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
|
2016-12-15 12:51:06 +00:00
|
|
|
qinfo->conf.rx_drop_en = 1;
|
2016-12-15 12:51:11 +00:00
|
|
|
qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
|
2018-07-15 09:56:14 +00:00
|
|
|
qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
|
2018-01-18 09:44:29 +00:00
|
|
|
if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
|
|
|
|
qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
|
|
|
|
qinfo->scattered_rx = 1;
|
|
|
|
}
|
2016-12-15 12:51:06 +00:00
|
|
|
qinfo->nb_desc = rxq_info->entries;
|
|
|
|
}
|
|
|
|
|
2017-05-18 14:00:04 +00:00
|
|
|
/*
|
|
|
|
* The function is used by the secondary process as well. It must not
|
|
|
|
* use any process-local pointers from the adapter data.
|
|
|
|
*/
|
2016-12-15 12:51:12 +00:00
|
|
|
static void
|
|
|
|
sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
|
|
|
struct rte_eth_txq_info *qinfo)
|
|
|
|
{
|
2019-02-07 12:17:48 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2016-12-15 12:51:12 +00:00
|
|
|
struct sfc_txq_info *txq_info;
|
|
|
|
|
2019-02-07 12:17:48 +00:00
|
|
|
SFC_ASSERT(tx_queue_id < sas->txq_count);
|
2016-12-15 12:51:12 +00:00
|
|
|
|
2019-02-07 12:17:48 +00:00
|
|
|
txq_info = &sas->txq_info[tx_queue_id];
|
2016-12-15 12:51:12 +00:00
|
|
|
|
|
|
|
memset(qinfo, 0, sizeof(*qinfo));
|
|
|
|
|
2019-02-07 12:17:28 +00:00
|
|
|
qinfo->conf.offloads = txq_info->offloads;
|
|
|
|
qinfo->conf.tx_free_thresh = txq_info->free_thresh;
|
2016-12-15 12:51:14 +00:00
|
|
|
qinfo->conf.tx_deferred_start = txq_info->deferred_start;
|
2016-12-15 12:51:12 +00:00
|
|
|
qinfo->nb_desc = txq_info->entries;
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:35 +00:00
|
|
|
/*
|
|
|
|
* The function is used by the secondary process as well. It must not
|
|
|
|
* use any process-local pointers from the adapter data.
|
|
|
|
*/
|
2016-12-15 12:51:08 +00:00
|
|
|
static uint32_t
|
|
|
|
sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
|
|
|
{
|
2019-02-07 12:17:31 +00:00
|
|
|
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
|
2019-02-07 12:17:47 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:33 +00:00
|
|
|
struct sfc_rxq_info *rxq_info;
|
2019-02-07 12:17:29 +00:00
|
|
|
|
2019-02-07 12:17:47 +00:00
|
|
|
SFC_ASSERT(rx_queue_id < sas->rxq_count);
|
|
|
|
rxq_info = &sas->rxq_info[rx_queue_id];
|
2016-12-15 12:51:08 +00:00
|
|
|
|
2019-02-07 12:17:34 +00:00
|
|
|
if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
|
2019-02-07 12:17:29 +00:00
|
|
|
return 0;
|
|
|
|
|
2019-02-07 12:17:34 +00:00
|
|
|
return sap->dp_rx->qdesc_npending(rxq_info->dp);
|
2016-12-15 12:51:08 +00:00
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:35 +00:00
|
|
|
/*
|
|
|
|
* The function is used by the secondary process as well. It must not
|
|
|
|
* use any process-local pointers from the adapter data.
|
|
|
|
*/
|
2016-12-15 12:51:09 +00:00
|
|
|
static int
|
|
|
|
sfc_rx_descriptor_done(void *queue, uint16_t offset)
|
|
|
|
{
|
2017-03-20 10:15:10 +00:00
|
|
|
struct sfc_dp_rxq *dp_rxq = queue;
|
2019-02-07 12:17:35 +00:00
|
|
|
const struct sfc_dp_rx *dp_rx;
|
|
|
|
|
|
|
|
dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
|
2016-12-15 12:51:09 +00:00
|
|
|
|
2019-02-07 12:17:35 +00:00
|
|
|
return offset < dp_rx->qdesc_npending(dp_rxq);
|
2016-12-15 12:51:09 +00:00
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:35 +00:00
|
|
|
/*
|
|
|
|
* The function is used by the secondary process as well. It must not
|
|
|
|
* use any process-local pointers from the adapter data.
|
|
|
|
*/
|
2017-08-30 09:08:46 +00:00
|
|
|
static int
|
|
|
|
sfc_rx_descriptor_status(void *queue, uint16_t offset)
|
|
|
|
{
|
|
|
|
struct sfc_dp_rxq *dp_rxq = queue;
|
2019-02-07 12:17:35 +00:00
|
|
|
const struct sfc_dp_rx *dp_rx;
|
2017-08-30 09:08:46 +00:00
|
|
|
|
2019-02-07 12:17:35 +00:00
|
|
|
dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
|
|
|
|
|
|
|
|
return dp_rx->qdesc_status(dp_rxq, offset);
|
2017-08-30 09:08:46 +00:00
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:38 +00:00
|
|
|
/*
|
|
|
|
* The function is used by the secondary process as well. It must not
|
|
|
|
* use any process-local pointers from the adapter data.
|
|
|
|
*/
|
2017-08-30 09:08:47 +00:00
|
|
|
static int
|
|
|
|
sfc_tx_descriptor_status(void *queue, uint16_t offset)
|
|
|
|
{
|
|
|
|
struct sfc_dp_txq *dp_txq = queue;
|
2019-02-07 12:17:38 +00:00
|
|
|
const struct sfc_dp_tx *dp_tx;
|
|
|
|
|
|
|
|
dp_tx = sfc_dp_tx_by_dp_txq(dp_txq);
|
2017-08-30 09:08:47 +00:00
|
|
|
|
2019-02-07 12:17:38 +00:00
|
|
|
return dp_tx->qdesc_status(dp_txq, offset);
|
2017-08-30 09:08:47 +00:00
|
|
|
}
|
|
|
|
|
2016-12-15 12:51:11 +00:00
|
|
|
static int
|
|
|
|
sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
|
|
|
{
|
2019-02-07 12:17:47 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:51:11 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "RxQ=%u", rx_queue_id);
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
rc = EINVAL;
|
|
|
|
if (sa->state != SFC_ADAPTER_STARTED)
|
|
|
|
goto fail_not_started;
|
|
|
|
|
2019-02-07 12:17:47 +00:00
|
|
|
if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)
|
2018-08-29 07:35:33 +00:00
|
|
|
goto fail_not_setup;
|
|
|
|
|
2016-12-15 12:51:11 +00:00
|
|
|
rc = sfc_rx_qstart(sa, rx_queue_id);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_rx_qstart;
|
|
|
|
|
2019-02-07 12:17:47 +00:00
|
|
|
sas->rxq_info[rx_queue_id].deferred_started = B_TRUE;
|
2016-12-15 12:51:11 +00:00
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_rx_qstart:
|
2018-08-29 07:35:33 +00:00
|
|
|
fail_not_setup:
|
2016-12-15 12:51:11 +00:00
|
|
|
fail_not_started:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
|
|
|
{
|
2019-02-07 12:17:47 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:51:11 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "RxQ=%u", rx_queue_id);
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
sfc_rx_qstop(sa, rx_queue_id);
|
|
|
|
|
2019-02-07 12:17:47 +00:00
|
|
|
sas->rxq_info[rx_queue_id].deferred_started = B_FALSE;
|
2016-12-15 12:51:11 +00:00
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:51:14 +00:00
|
|
|
static int
|
|
|
|
sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
|
|
|
{
|
2019-02-07 12:17:48 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:51:14 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "TxQ = %u", tx_queue_id);
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
rc = EINVAL;
|
|
|
|
if (sa->state != SFC_ADAPTER_STARTED)
|
|
|
|
goto fail_not_started;
|
|
|
|
|
2019-02-07 12:17:48 +00:00
|
|
|
if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED)
|
2018-08-29 07:35:34 +00:00
|
|
|
goto fail_not_setup;
|
|
|
|
|
2016-12-15 12:51:14 +00:00
|
|
|
rc = sfc_tx_qstart(sa, tx_queue_id);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_tx_qstart;
|
|
|
|
|
2019-02-07 12:17:48 +00:00
|
|
|
sas->txq_info[tx_queue_id].deferred_started = B_TRUE;
|
2016-12-15 12:51:14 +00:00
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_tx_qstart:
|
|
|
|
|
2018-08-29 07:35:34 +00:00
|
|
|
fail_not_setup:
|
2016-12-15 12:51:14 +00:00
|
|
|
fail_not_started:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
|
|
|
{
|
2019-02-07 12:17:48 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2016-12-15 12:51:14 +00:00
|
|
|
|
|
|
|
sfc_log_init(sa, "TxQ = %u", tx_queue_id);
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
sfc_tx_qstop(sa, tx_queue_id);
|
|
|
|
|
2019-02-07 12:17:48 +00:00
|
|
|
sas->txq_info[tx_queue_id].deferred_started = B_FALSE;
|
2016-12-15 12:51:14 +00:00
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-24 10:46:39 +00:00
|
|
|
static efx_tunnel_protocol_t
|
|
|
|
sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
|
|
|
|
{
|
|
|
|
switch (rte_type) {
|
|
|
|
case RTE_TUNNEL_TYPE_VXLAN:
|
|
|
|
return EFX_TUNNEL_PROTOCOL_VXLAN;
|
|
|
|
case RTE_TUNNEL_TYPE_GENEVE:
|
|
|
|
return EFX_TUNNEL_PROTOCOL_GENEVE;
|
|
|
|
default:
|
|
|
|
return EFX_TUNNEL_NPROTOS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
enum sfc_udp_tunnel_op_e {
|
|
|
|
SFC_UDP_TUNNEL_ADD_PORT,
|
|
|
|
SFC_UDP_TUNNEL_DEL_PORT,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_udp_tunnel *tunnel_udp,
|
|
|
|
enum sfc_udp_tunnel_op_e op)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2017-12-24 10:46:39 +00:00
|
|
|
efx_tunnel_protocol_t tunnel_proto;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
|
|
|
|
(op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
|
|
|
|
(op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
|
|
|
|
tunnel_udp->udp_port, tunnel_udp->prot_type);
|
|
|
|
|
|
|
|
tunnel_proto =
|
|
|
|
sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
|
|
|
|
if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
|
|
|
|
rc = ENOTSUP;
|
|
|
|
goto fail_bad_proto;
|
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case SFC_UDP_TUNNEL_ADD_PORT:
|
|
|
|
rc = efx_tunnel_config_udp_add(sa->nic,
|
|
|
|
tunnel_udp->udp_port,
|
|
|
|
tunnel_proto);
|
|
|
|
break;
|
|
|
|
case SFC_UDP_TUNNEL_DEL_PORT:
|
|
|
|
rc = efx_tunnel_config_udp_remove(sa->nic,
|
|
|
|
tunnel_udp->udp_port,
|
|
|
|
tunnel_proto);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rc = EINVAL;
|
|
|
|
goto fail_bad_op;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_op;
|
|
|
|
|
|
|
|
if (sa->state == SFC_ADAPTER_STARTED) {
|
|
|
|
rc = efx_tunnel_reconfigure(sa->nic);
|
|
|
|
if (rc == EAGAIN) {
|
|
|
|
/*
|
|
|
|
* Configuration is accepted by FW and MC reboot
|
|
|
|
* is initiated to apply the changes. MC reboot
|
|
|
|
* will be handled in a usual way (MC reboot
|
|
|
|
* event on management event queue and adapter
|
|
|
|
* restart).
|
|
|
|
*/
|
|
|
|
rc = 0;
|
|
|
|
} else if (rc != 0) {
|
|
|
|
goto fail_reconfigure;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_reconfigure:
|
|
|
|
/* Remove/restore entry since the change makes the trouble */
|
|
|
|
switch (op) {
|
|
|
|
case SFC_UDP_TUNNEL_ADD_PORT:
|
|
|
|
(void)efx_tunnel_config_udp_remove(sa->nic,
|
|
|
|
tunnel_udp->udp_port,
|
|
|
|
tunnel_proto);
|
|
|
|
break;
|
|
|
|
case SFC_UDP_TUNNEL_DEL_PORT:
|
|
|
|
(void)efx_tunnel_config_udp_add(sa->nic,
|
|
|
|
tunnel_udp->udp_port,
|
|
|
|
tunnel_proto);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
fail_op:
|
|
|
|
fail_bad_op:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
fail_bad_proto:
|
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_udp_tunnel *tunnel_udp)
|
|
|
|
{
|
|
|
|
return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_udp_tunnel *tunnel_udp)
|
|
|
|
{
|
|
|
|
return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:39 +00:00
|
|
|
/*
|
|
|
|
* The function is used by the secondary process as well. It must not
|
|
|
|
* use any process-local pointers from the adapter data.
|
|
|
|
*/
|
2016-12-15 12:51:19 +00:00
|
|
|
static int
|
|
|
|
sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
2019-02-07 12:17:49 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
|
|
|
struct sfc_rss *rss = &sas->rss;
|
2016-12-15 12:51:19 +00:00
|
|
|
|
2018-10-11 14:51:12 +00:00
|
|
|
if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE)
|
2016-12-15 12:51:19 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mapping of hash configuration between RTE and EFX is not one-to-one,
|
|
|
|
* hence, conversion is done here to derive a correct set of ETH_RSS
|
|
|
|
* flags which corresponds to the active EFX configuration stored
|
|
|
|
* locally in 'sfc_adapter' and kept up-to-date
|
|
|
|
*/
|
2019-02-07 12:17:49 +00:00
|
|
|
rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types);
|
2017-08-30 18:17:40 +00:00
|
|
|
rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
|
2016-12-15 12:51:19 +00:00
|
|
|
if (rss_conf->rss_key != NULL)
|
2018-04-25 17:51:43 +00:00
|
|
|
rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
|
2016-12-15 12:51:19 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-15 12:51:20 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2019-02-07 12:17:49 +00:00
|
|
|
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
|
2016-12-15 12:51:20 +00:00
|
|
|
unsigned int efx_hash_types;
|
2020-09-24 12:40:59 +00:00
|
|
|
uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context};
|
|
|
|
unsigned int n_contexts;
|
|
|
|
unsigned int mode_i = 0;
|
|
|
|
unsigned int key_i = 0;
|
|
|
|
unsigned int i = 0;
|
2016-12-15 12:51:20 +00:00
|
|
|
int rc = 0;
|
|
|
|
|
2020-09-24 12:40:59 +00:00
|
|
|
n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2;
|
|
|
|
|
2019-02-07 12:17:50 +00:00
|
|
|
if (sfc_sa2shared(sa)->isolated)
|
2017-06-20 16:37:09 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
|
2016-12-15 12:51:20 +00:00
|
|
|
sfc_err(sa, "RSS is not available");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (rss->channels == 0) {
|
2017-04-18 13:03:07 +00:00
|
|
|
sfc_err(sa, "RSS is not configured");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:51:20 +00:00
|
|
|
if ((rss_conf->rss_key != NULL) &&
|
2018-04-25 17:51:43 +00:00
|
|
|
(rss_conf->rss_key_len != sizeof(rss->key))) {
|
2020-02-19 14:04:57 +00:00
|
|
|
sfc_err(sa, "RSS key size is wrong (should be %zu)",
|
2018-04-25 17:51:43 +00:00
|
|
|
sizeof(rss->key));
|
2016-12-15 12:51:20 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2018-04-25 17:51:44 +00:00
|
|
|
rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_rx_hf_rte_to_efx;
|
2016-12-15 12:51:20 +00:00
|
|
|
|
2020-09-24 12:40:59 +00:00
|
|
|
for (mode_i = 0; mode_i < n_contexts; mode_i++) {
|
|
|
|
rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i],
|
|
|
|
rss->hash_alg, efx_hash_types,
|
|
|
|
B_TRUE);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_scale_mode_set;
|
|
|
|
}
|
2016-12-15 12:51:20 +00:00
|
|
|
|
|
|
|
if (rss_conf->rss_key != NULL) {
|
|
|
|
if (sa->state == SFC_ADAPTER_STARTED) {
|
2020-09-24 12:40:59 +00:00
|
|
|
for (key_i = 0; key_i < n_contexts; key_i++) {
|
|
|
|
rc = efx_rx_scale_key_set(sa->nic,
|
|
|
|
contexts[key_i],
|
|
|
|
rss_conf->rss_key,
|
|
|
|
sizeof(rss->key));
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_scale_key_set;
|
|
|
|
}
|
2016-12-15 12:51:20 +00:00
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
|
2016-12-15 12:51:20 +00:00
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
rss->hash_types = efx_hash_types;
|
2016-12-15 12:51:20 +00:00
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_scale_key_set:
|
2020-09-24 12:40:59 +00:00
|
|
|
for (i = 0; i < key_i; i++) {
|
|
|
|
if (efx_rx_scale_key_set(sa->nic, contexts[i], rss->key,
|
|
|
|
sizeof(rss->key)) != 0)
|
|
|
|
sfc_err(sa, "failed to restore RSS key");
|
|
|
|
}
|
2016-12-15 12:51:20 +00:00
|
|
|
|
|
|
|
fail_scale_mode_set:
|
2020-09-24 12:40:59 +00:00
|
|
|
for (i = 0; i < mode_i; i++) {
|
|
|
|
if (efx_rx_scale_mode_set(sa->nic, contexts[i],
|
|
|
|
EFX_RX_HASHALG_TOEPLITZ,
|
|
|
|
rss->hash_types, B_TRUE) != 0)
|
|
|
|
sfc_err(sa, "failed to restore RSS mode");
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:44 +00:00
|
|
|
fail_rx_hf_rte_to_efx:
|
2016-12-15 12:51:20 +00:00
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
return -rc;
|
|
|
|
}
|
2016-12-15 12:51:21 +00:00
|
|
|
|
2019-02-07 12:17:39 +00:00
|
|
|
/*
|
|
|
|
* The function is used by the secondary process as well. It must not
|
|
|
|
* use any process-local pointers from the adapter data.
|
|
|
|
*/
|
2016-12-15 12:51:21 +00:00
|
|
|
static int
|
|
|
|
sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
|
|
|
{
|
2019-02-07 12:17:49 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
|
|
|
struct sfc_rss *rss = &sas->rss;
|
2016-12-15 12:51:21 +00:00
|
|
|
int entry;
|
|
|
|
|
2019-02-07 12:17:50 +00:00
|
|
|
if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated)
|
2016-12-15 12:51:21 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (rss->channels == 0)
|
2017-04-18 13:03:07 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-12-15 12:51:21 +00:00
|
|
|
if (reta_size != EFX_RSS_TBL_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
for (entry = 0; entry < reta_size; entry++) {
|
|
|
|
int grp = entry / RTE_RETA_GROUP_SIZE;
|
|
|
|
int grp_idx = entry % RTE_RETA_GROUP_SIZE;
|
|
|
|
|
|
|
|
if ((reta_conf[grp].mask >> grp_idx) & 1)
|
2018-04-25 17:51:43 +00:00
|
|
|
reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
|
2016-12-15 12:51:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-15 12:51:22 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2019-02-07 12:17:49 +00:00
|
|
|
struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
|
2016-12-15 12:51:22 +00:00
|
|
|
unsigned int *rss_tbl_new;
|
|
|
|
uint16_t entry;
|
2017-10-04 07:34:26 +00:00
|
|
|
int rc = 0;
|
2016-12-15 12:51:22 +00:00
|
|
|
|
|
|
|
|
2019-02-07 12:17:50 +00:00
|
|
|
if (sfc_sa2shared(sa)->isolated)
|
2017-06-20 16:37:09 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
|
2016-12-15 12:51:22 +00:00
|
|
|
sfc_err(sa, "RSS is not available");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
if (rss->channels == 0) {
|
2017-04-18 13:03:07 +00:00
|
|
|
sfc_err(sa, "RSS is not configured");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:51:22 +00:00
|
|
|
if (reta_size != EFX_RSS_TBL_SIZE) {
|
|
|
|
sfc_err(sa, "RETA size is wrong (should be %u)",
|
|
|
|
EFX_RSS_TBL_SIZE);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
|
2016-12-15 12:51:22 +00:00
|
|
|
if (rss_tbl_new == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
|
2016-12-15 12:51:22 +00:00
|
|
|
|
|
|
|
for (entry = 0; entry < reta_size; entry++) {
|
|
|
|
int grp_idx = entry % RTE_RETA_GROUP_SIZE;
|
|
|
|
struct rte_eth_rss_reta_entry64 *grp;
|
|
|
|
|
|
|
|
grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
|
|
|
|
|
|
|
|
if (grp->mask & (1ull << grp_idx)) {
|
2018-04-25 17:51:43 +00:00
|
|
|
if (grp->reta[grp_idx] >= rss->channels) {
|
2016-12-15 12:51:22 +00:00
|
|
|
rc = EINVAL;
|
|
|
|
goto bad_reta_entry;
|
|
|
|
}
|
|
|
|
rss_tbl_new[entry] = grp->reta[grp_idx];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-04 07:34:26 +00:00
|
|
|
if (sa->state == SFC_ADAPTER_STARTED) {
|
|
|
|
rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
|
|
|
|
rss_tbl_new, EFX_RSS_TBL_SIZE);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_scale_tbl_set;
|
|
|
|
}
|
|
|
|
|
2018-04-25 17:51:43 +00:00
|
|
|
rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
|
2016-12-15 12:51:22 +00:00
|
|
|
|
2017-10-04 07:34:26 +00:00
|
|
|
fail_scale_tbl_set:
|
2016-12-15 12:51:22 +00:00
|
|
|
bad_reta_entry:
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
|
|
|
rte_free(rss_tbl_new);
|
|
|
|
|
|
|
|
SFC_ASSERT(rc >= 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
2016-12-15 12:51:19 +00:00
|
|
|
|
2017-03-09 15:26:25 +00:00
|
|
|
static int
|
2021-03-21 09:00:00 +00:00
|
|
|
sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
const struct rte_flow_ops **ops)
|
2017-03-09 15:26:25 +00:00
|
|
|
{
|
2021-03-21 09:00:00 +00:00
|
|
|
*ops = &sfc_flow_ops;
|
|
|
|
return 0;
|
2017-03-09 15:26:25 +00:00
|
|
|
}
|
|
|
|
|
2018-04-19 11:36:56 +00:00
|
|
|
static int
|
|
|
|
sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
|
|
|
|
{
|
2019-02-07 12:17:31 +00:00
|
|
|
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
|
2018-04-19 11:36:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If Rx datapath does not provide callback to check mempool,
|
|
|
|
* all pools are supported.
|
|
|
|
*/
|
2019-02-07 12:17:31 +00:00
|
|
|
if (sap->dp_rx->pool_ops_supported == NULL)
|
2018-04-19 11:36:56 +00:00
|
|
|
return 1;
|
|
|
|
|
2019-02-07 12:17:31 +00:00
|
|
|
return sap->dp_rx->pool_ops_supported(pool);
|
2018-04-19 11:36:56 +00:00
|
|
|
}
|
|
|
|
|
2019-06-06 17:33:24 +00:00
|
|
|
static int
|
|
|
|
sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
|
|
|
|
{
|
|
|
|
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
|
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
|
|
|
struct sfc_rxq_info *rxq_info;
|
|
|
|
|
|
|
|
SFC_ASSERT(queue_id < sas->rxq_count);
|
|
|
|
rxq_info = &sas->rxq_info[queue_id];
|
|
|
|
|
|
|
|
return sap->dp_rx->intr_enable(rxq_info->dp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
|
|
|
|
{
|
|
|
|
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
|
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
|
|
|
struct sfc_rxq_info *rxq_info;
|
|
|
|
|
|
|
|
SFC_ASSERT(queue_id < sas->rxq_count);
|
|
|
|
rxq_info = &sas->rxq_info[queue_id];
|
|
|
|
|
|
|
|
return sap->dp_rx->intr_disable(rxq_info->dp);
|
|
|
|
}
|
|
|
|
|
2016-11-29 16:18:33 +00:00
|
|
|
static const struct eth_dev_ops sfc_eth_dev_ops = {
|
2016-11-29 16:19:04 +00:00
|
|
|
.dev_configure = sfc_dev_configure,
|
2016-11-29 16:19:06 +00:00
|
|
|
.dev_start = sfc_dev_start,
|
|
|
|
.dev_stop = sfc_dev_stop,
|
2016-12-15 12:51:00 +00:00
|
|
|
.dev_set_link_up = sfc_dev_set_link_up,
|
|
|
|
.dev_set_link_down = sfc_dev_set_link_down,
|
2016-11-29 16:19:04 +00:00
|
|
|
.dev_close = sfc_dev_close,
|
2016-12-15 12:51:01 +00:00
|
|
|
.promiscuous_enable = sfc_dev_promisc_enable,
|
|
|
|
.promiscuous_disable = sfc_dev_promisc_disable,
|
|
|
|
.allmulticast_enable = sfc_dev_allmulti_enable,
|
|
|
|
.allmulticast_disable = sfc_dev_allmulti_disable,
|
2016-11-29 16:19:14 +00:00
|
|
|
.link_update = sfc_dev_link_update,
|
2016-12-15 12:50:54 +00:00
|
|
|
.stats_get = sfc_stats_get,
|
2017-03-03 12:49:25 +00:00
|
|
|
.stats_reset = sfc_stats_reset,
|
2016-12-15 12:50:55 +00:00
|
|
|
.xstats_get = sfc_xstats_get,
|
2017-03-03 12:49:25 +00:00
|
|
|
.xstats_reset = sfc_stats_reset,
|
2016-12-15 12:50:55 +00:00
|
|
|
.xstats_get_names = sfc_xstats_get_names,
|
2016-11-29 16:18:33 +00:00
|
|
|
.dev_infos_get = sfc_dev_infos_get,
|
2016-12-15 12:51:05 +00:00
|
|
|
.dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
|
2016-12-15 12:50:58 +00:00
|
|
|
.mtu_set = sfc_dev_set_mtu,
|
2016-12-15 12:51:11 +00:00
|
|
|
.rx_queue_start = sfc_rx_queue_start,
|
|
|
|
.rx_queue_stop = sfc_rx_queue_stop,
|
2016-12-15 12:51:14 +00:00
|
|
|
.tx_queue_start = sfc_tx_queue_start,
|
|
|
|
.tx_queue_stop = sfc_tx_queue_stop,
|
2016-11-29 16:19:17 +00:00
|
|
|
.rx_queue_setup = sfc_rx_queue_setup,
|
|
|
|
.rx_queue_release = sfc_rx_queue_release,
|
2019-06-06 17:33:24 +00:00
|
|
|
.rx_queue_intr_enable = sfc_rx_queue_intr_enable,
|
|
|
|
.rx_queue_intr_disable = sfc_rx_queue_intr_disable,
|
2016-11-29 16:19:25 +00:00
|
|
|
.tx_queue_setup = sfc_tx_queue_setup,
|
|
|
|
.tx_queue_release = sfc_tx_queue_release,
|
2016-12-15 12:50:56 +00:00
|
|
|
.flow_ctrl_get = sfc_flow_ctrl_get,
|
|
|
|
.flow_ctrl_set = sfc_flow_ctrl_set,
|
2016-12-15 12:51:02 +00:00
|
|
|
.mac_addr_set = sfc_mac_addr_set,
|
2017-12-24 10:46:39 +00:00
|
|
|
.udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
|
|
|
|
.udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
|
2016-12-15 12:51:22 +00:00
|
|
|
.reta_update = sfc_dev_rss_reta_update,
|
2016-12-15 12:51:21 +00:00
|
|
|
.reta_query = sfc_dev_rss_reta_query,
|
2016-12-15 12:51:20 +00:00
|
|
|
.rss_hash_update = sfc_dev_rss_hash_update,
|
2016-12-15 12:51:19 +00:00
|
|
|
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
|
2021-03-21 09:00:00 +00:00
|
|
|
.flow_ops_get = sfc_dev_flow_ops_get,
|
2016-12-15 12:51:03 +00:00
|
|
|
.set_mc_addr_list = sfc_set_mc_addr_list,
|
2016-12-15 12:51:06 +00:00
|
|
|
.rxq_info_get = sfc_rx_queue_info_get,
|
2016-12-15 12:51:12 +00:00
|
|
|
.txq_info_get = sfc_tx_queue_info_get,
|
2017-03-16 11:01:35 +00:00
|
|
|
.fw_version_get = sfc_fw_version_get,
|
2017-07-08 15:45:15 +00:00
|
|
|
.xstats_get_by_id = sfc_xstats_get_by_id,
|
|
|
|
.xstats_get_names_by_id = sfc_xstats_get_names_by_id,
|
2018-04-19 11:36:56 +00:00
|
|
|
.pool_ops_supported = sfc_pool_ops_supported,
|
2016-11-29 16:18:33 +00:00
|
|
|
};
|
|
|
|
|
2017-05-18 14:00:04 +00:00
|
|
|
/**
|
|
|
|
* Duplicate a string in potentially shared memory required for
|
|
|
|
* multi-process support.
|
|
|
|
*
|
|
|
|
* strdup() allocates from process-local heap/memory.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
sfc_strdup(const char *str)
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
char *copy;
|
|
|
|
|
|
|
|
if (str == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
size = strlen(str) + 1;
|
|
|
|
copy = rte_malloc(__func__, size, 0);
|
|
|
|
if (copy != NULL)
|
|
|
|
rte_memcpy(copy, str, size);
|
|
|
|
|
|
|
|
return copy;
|
|
|
|
}
|
|
|
|
|
2017-03-20 10:15:10 +00:00
|
|
|
static int
|
|
|
|
sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2019-02-07 12:17:45 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:30 +00:00
|
|
|
const struct sfc_dp_rx *dp_rx;
|
|
|
|
const struct sfc_dp_tx *dp_tx;
|
2018-04-19 11:36:55 +00:00
|
|
|
const efx_nic_cfg_t *encp;
|
2017-03-20 10:15:10 +00:00
|
|
|
unsigned int avail_caps = 0;
|
|
|
|
const char *rx_name = NULL;
|
2017-03-20 10:15:14 +00:00
|
|
|
const char *tx_name = NULL;
|
2017-03-20 10:15:10 +00:00
|
|
|
int rc;
|
|
|
|
|
2017-03-20 10:15:13 +00:00
|
|
|
switch (sa->family) {
|
|
|
|
case EFX_FAMILY_HUNTINGTON:
|
|
|
|
case EFX_FAMILY_MEDFORD:
|
2018-03-21 13:51:32 +00:00
|
|
|
case EFX_FAMILY_MEDFORD2:
|
2017-03-20 10:15:13 +00:00
|
|
|
avail_caps |= SFC_DP_HW_FW_CAP_EF10;
|
2020-10-13 13:45:26 +00:00
|
|
|
avail_caps |= SFC_DP_HW_FW_CAP_RX_EFX;
|
|
|
|
avail_caps |= SFC_DP_HW_FW_CAP_TX_EFX;
|
2017-03-20 10:15:13 +00:00
|
|
|
break;
|
2020-10-13 13:45:27 +00:00
|
|
|
case EFX_FAMILY_RIVERHEAD:
|
|
|
|
avail_caps |= SFC_DP_HW_FW_CAP_EF100;
|
|
|
|
break;
|
2017-03-20 10:15:13 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-04-19 11:36:55 +00:00
|
|
|
encp = efx_nic_cfg_get(sa->nic);
|
|
|
|
if (encp->enc_rx_es_super_buffer_supported)
|
|
|
|
avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
|
|
|
|
|
2017-03-20 10:15:10 +00:00
|
|
|
rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
|
|
|
|
sfc_kvarg_string_handler, &rx_name);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_kvarg_rx_datapath;
|
|
|
|
|
|
|
|
if (rx_name != NULL) {
|
2019-02-07 12:17:30 +00:00
|
|
|
dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
|
|
|
|
if (dp_rx == NULL) {
|
2017-03-20 10:15:10 +00:00
|
|
|
sfc_err(sa, "Rx datapath %s not found", rx_name);
|
|
|
|
rc = ENOENT;
|
|
|
|
goto fail_dp_rx;
|
|
|
|
}
|
2019-02-07 12:17:30 +00:00
|
|
|
if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) {
|
2017-03-20 10:15:10 +00:00
|
|
|
sfc_err(sa,
|
|
|
|
"Insufficient Hw/FW capabilities to use Rx datapath %s",
|
|
|
|
rx_name);
|
|
|
|
rc = EINVAL;
|
2017-05-18 14:00:02 +00:00
|
|
|
goto fail_dp_rx_caps;
|
2017-03-20 10:15:10 +00:00
|
|
|
}
|
|
|
|
} else {
|
2019-02-07 12:17:30 +00:00
|
|
|
dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
|
|
|
|
if (dp_rx == NULL) {
|
2017-03-20 10:15:10 +00:00
|
|
|
sfc_err(sa, "Rx datapath by caps %#x not found",
|
|
|
|
avail_caps);
|
|
|
|
rc = ENOENT;
|
|
|
|
goto fail_dp_rx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:45 +00:00
|
|
|
sas->dp_rx_name = sfc_strdup(dp_rx->dp.name);
|
|
|
|
if (sas->dp_rx_name == NULL) {
|
2017-05-18 14:00:04 +00:00
|
|
|
rc = ENOMEM;
|
|
|
|
goto fail_dp_rx_name;
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:45 +00:00
|
|
|
sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
|
2017-03-20 10:15:10 +00:00
|
|
|
|
2017-03-20 10:15:14 +00:00
|
|
|
rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
|
|
|
|
sfc_kvarg_string_handler, &tx_name);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_kvarg_tx_datapath;
|
|
|
|
|
|
|
|
if (tx_name != NULL) {
|
2019-02-07 12:17:30 +00:00
|
|
|
dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
|
|
|
|
if (dp_tx == NULL) {
|
2017-03-20 10:15:14 +00:00
|
|
|
sfc_err(sa, "Tx datapath %s not found", tx_name);
|
|
|
|
rc = ENOENT;
|
|
|
|
goto fail_dp_tx;
|
|
|
|
}
|
2019-02-07 12:17:30 +00:00
|
|
|
if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) {
|
2017-03-20 10:15:14 +00:00
|
|
|
sfc_err(sa,
|
|
|
|
"Insufficient Hw/FW capabilities to use Tx datapath %s",
|
|
|
|
tx_name);
|
|
|
|
rc = EINVAL;
|
2017-05-18 14:00:02 +00:00
|
|
|
goto fail_dp_tx_caps;
|
2017-03-20 10:15:14 +00:00
|
|
|
}
|
|
|
|
} else {
|
2019-02-07 12:17:30 +00:00
|
|
|
dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
|
|
|
|
if (dp_tx == NULL) {
|
2017-03-20 10:15:14 +00:00
|
|
|
sfc_err(sa, "Tx datapath by caps %#x not found",
|
|
|
|
avail_caps);
|
|
|
|
rc = ENOENT;
|
|
|
|
goto fail_dp_tx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:45 +00:00
|
|
|
sas->dp_tx_name = sfc_strdup(dp_tx->dp.name);
|
|
|
|
if (sas->dp_tx_name == NULL) {
|
2017-05-18 14:00:04 +00:00
|
|
|
rc = ENOMEM;
|
|
|
|
goto fail_dp_tx_name;
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:45 +00:00
|
|
|
sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name);
|
2017-03-20 10:15:14 +00:00
|
|
|
|
2019-02-07 12:17:31 +00:00
|
|
|
sa->priv.dp_rx = dp_rx;
|
|
|
|
sa->priv.dp_tx = dp_tx;
|
2019-02-07 12:17:30 +00:00
|
|
|
|
|
|
|
dev->rx_pkt_burst = dp_rx->pkt_burst;
|
2019-04-02 09:28:36 +00:00
|
|
|
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
|
2019-02-07 12:17:30 +00:00
|
|
|
dev->tx_pkt_burst = dp_tx->pkt_burst;
|
2017-03-20 10:15:10 +00:00
|
|
|
|
2020-09-09 13:01:43 +00:00
|
|
|
dev->rx_queue_count = sfc_rx_queue_count;
|
|
|
|
dev->rx_descriptor_done = sfc_rx_descriptor_done;
|
|
|
|
dev->rx_descriptor_status = sfc_rx_descriptor_status;
|
|
|
|
dev->tx_descriptor_status = sfc_tx_descriptor_status;
|
2017-03-20 10:15:10 +00:00
|
|
|
dev->dev_ops = &sfc_eth_dev_ops;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2017-05-18 14:00:04 +00:00
|
|
|
fail_dp_tx_name:
|
2017-05-18 14:00:02 +00:00
|
|
|
fail_dp_tx_caps:
|
2017-03-20 10:15:14 +00:00
|
|
|
fail_dp_tx:
|
|
|
|
fail_kvarg_tx_datapath:
|
2019-02-07 12:17:45 +00:00
|
|
|
rte_free(sas->dp_rx_name);
|
|
|
|
sas->dp_rx_name = NULL;
|
2017-05-18 14:00:04 +00:00
|
|
|
|
|
|
|
fail_dp_rx_name:
|
2017-05-18 14:00:02 +00:00
|
|
|
fail_dp_rx_caps:
|
2017-03-20 10:15:10 +00:00
|
|
|
fail_dp_rx:
|
|
|
|
fail_kvarg_rx_datapath:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-05-18 14:00:02 +00:00
|
|
|
static void
|
|
|
|
sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
|
2019-02-07 12:17:45 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2017-05-18 14:00:02 +00:00
|
|
|
|
|
|
|
dev->dev_ops = NULL;
|
2019-04-02 09:28:36 +00:00
|
|
|
dev->tx_pkt_prepare = NULL;
|
2017-05-18 14:00:02 +00:00
|
|
|
dev->rx_pkt_burst = NULL;
|
|
|
|
dev->tx_pkt_burst = NULL;
|
|
|
|
|
2019-02-07 12:17:45 +00:00
|
|
|
rte_free(sas->dp_tx_name);
|
|
|
|
sas->dp_tx_name = NULL;
|
2019-02-07 12:17:31 +00:00
|
|
|
sa->priv.dp_tx = NULL;
|
2017-05-18 14:00:04 +00:00
|
|
|
|
2019-02-07 12:17:45 +00:00
|
|
|
rte_free(sas->dp_rx_name);
|
|
|
|
sas->dp_rx_name = NULL;
|
2019-02-07 12:17:31 +00:00
|
|
|
sa->priv.dp_rx = NULL;
|
2017-05-18 14:00:02 +00:00
|
|
|
}
|
|
|
|
|
2017-05-18 14:00:04 +00:00
|
|
|
static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
|
2019-02-07 12:17:53 +00:00
|
|
|
.dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
|
2019-02-07 12:17:39 +00:00
|
|
|
.reta_query = sfc_dev_rss_reta_query,
|
|
|
|
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
|
2017-05-18 14:00:04 +00:00
|
|
|
.rxq_info_get = sfc_rx_queue_info_get,
|
|
|
|
.txq_info_get = sfc_tx_queue_info_get,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2019-02-07 12:17:31 +00:00
|
|
|
sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
|
2017-05-18 14:00:04 +00:00
|
|
|
{
|
2019-02-07 12:17:45 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2019-02-07 12:17:31 +00:00
|
|
|
struct sfc_adapter_priv *sap;
|
2017-05-18 14:00:04 +00:00
|
|
|
const struct sfc_dp_rx *dp_rx;
|
|
|
|
const struct sfc_dp_tx *dp_tx;
|
|
|
|
int rc;
|
|
|
|
|
2019-02-07 12:17:31 +00:00
|
|
|
/*
|
|
|
|
* Allocate process private data from heap, since it should not
|
|
|
|
* be located in shared memory allocated using rte_malloc() API.
|
|
|
|
*/
|
|
|
|
sap = calloc(1, sizeof(*sap));
|
|
|
|
if (sap == NULL) {
|
|
|
|
rc = ENOMEM;
|
|
|
|
goto fail_alloc_priv;
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:32 +00:00
|
|
|
sap->logtype_main = logtype_main;
|
|
|
|
|
2019-02-07 12:17:45 +00:00
|
|
|
dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name);
|
2017-05-18 14:00:04 +00:00
|
|
|
if (dp_rx == NULL) {
|
2019-02-07 12:17:46 +00:00
|
|
|
SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
|
2019-02-07 12:17:45 +00:00
|
|
|
"cannot find %s Rx datapath", sas->dp_rx_name);
|
2017-05-18 14:00:04 +00:00
|
|
|
rc = ENOENT;
|
|
|
|
goto fail_dp_rx;
|
|
|
|
}
|
|
|
|
if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
|
2019-02-07 12:17:46 +00:00
|
|
|
SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
|
2019-02-07 12:17:26 +00:00
|
|
|
"%s Rx datapath does not support multi-process",
|
2019-02-07 12:17:45 +00:00
|
|
|
sas->dp_rx_name);
|
2017-05-18 14:00:04 +00:00
|
|
|
rc = EINVAL;
|
|
|
|
goto fail_dp_rx_multi_process;
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:45 +00:00
|
|
|
dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name);
|
2017-05-18 14:00:04 +00:00
|
|
|
if (dp_tx == NULL) {
|
2019-02-07 12:17:46 +00:00
|
|
|
SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
|
2019-02-07 12:17:45 +00:00
|
|
|
"cannot find %s Tx datapath", sas->dp_tx_name);
|
2017-05-18 14:00:04 +00:00
|
|
|
rc = ENOENT;
|
|
|
|
goto fail_dp_tx;
|
|
|
|
}
|
|
|
|
if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
|
2019-02-07 12:17:46 +00:00
|
|
|
SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
|
2019-02-07 12:17:26 +00:00
|
|
|
"%s Tx datapath does not support multi-process",
|
2019-02-07 12:17:45 +00:00
|
|
|
sas->dp_tx_name);
|
2017-05-18 14:00:04 +00:00
|
|
|
rc = EINVAL;
|
|
|
|
goto fail_dp_tx_multi_process;
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:31 +00:00
|
|
|
sap->dp_rx = dp_rx;
|
|
|
|
sap->dp_tx = dp_tx;
|
|
|
|
|
|
|
|
dev->process_private = sap;
|
2017-05-18 14:00:04 +00:00
|
|
|
dev->rx_pkt_burst = dp_rx->pkt_burst;
|
2019-04-02 09:28:36 +00:00
|
|
|
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
|
2017-05-18 14:00:04 +00:00
|
|
|
dev->tx_pkt_burst = dp_tx->pkt_burst;
|
2020-09-09 13:01:43 +00:00
|
|
|
dev->rx_queue_count = sfc_rx_queue_count;
|
|
|
|
dev->rx_descriptor_done = sfc_rx_descriptor_done;
|
|
|
|
dev->rx_descriptor_status = sfc_rx_descriptor_status;
|
|
|
|
dev->tx_descriptor_status = sfc_tx_descriptor_status;
|
2017-05-18 14:00:04 +00:00
|
|
|
dev->dev_ops = &sfc_eth_dev_secondary_ops;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_dp_tx_multi_process:
|
|
|
|
fail_dp_tx:
|
|
|
|
fail_dp_rx_multi_process:
|
|
|
|
fail_dp_rx:
|
2019-02-07 12:17:31 +00:00
|
|
|
free(sap);
|
|
|
|
|
|
|
|
fail_alloc_priv:
|
2017-05-18 14:00:04 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-03-20 10:15:10 +00:00
|
|
|
static void
|
|
|
|
sfc_register_dp(void)
|
|
|
|
{
|
|
|
|
/* Register once */
|
2017-03-20 10:15:13 +00:00
|
|
|
if (TAILQ_EMPTY(&sfc_dp_head)) {
|
|
|
|
/* Prefer EF10 datapath */
|
2020-10-13 13:45:33 +00:00
|
|
|
sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);
|
2018-04-19 11:36:55 +00:00
|
|
|
sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
|
2017-03-20 10:15:13 +00:00
|
|
|
sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
|
2017-03-20 10:15:10 +00:00
|
|
|
sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
|
2017-03-20 10:15:14 +00:00
|
|
|
|
2020-10-13 13:45:34 +00:00
|
|
|
sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp);
|
2017-03-20 10:15:17 +00:00
|
|
|
sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
|
2017-03-20 10:15:14 +00:00
|
|
|
sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
|
2017-03-20 10:15:19 +00:00
|
|
|
sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
|
2017-03-20 10:15:13 +00:00
|
|
|
}
|
2017-03-20 10:15:10 +00:00
|
|
|
}
|
|
|
|
|
2016-11-29 16:18:33 +00:00
|
|
|
static int
|
|
|
|
sfc_eth_dev_init(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
|
2017-05-15 10:24:03 +00:00
|
|
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
2019-02-07 12:17:26 +00:00
|
|
|
uint32_t logtype_main;
|
2019-02-07 12:17:52 +00:00
|
|
|
struct sfc_adapter *sa;
|
2016-11-29 16:18:33 +00:00
|
|
|
int rc;
|
2016-11-29 16:19:03 +00:00
|
|
|
const efx_nic_cfg_t *encp;
|
2019-05-21 16:13:03 +00:00
|
|
|
const struct rte_ether_addr *from;
|
2020-09-17 06:34:36 +00:00
|
|
|
int ret;
|
2016-11-29 16:18:33 +00:00
|
|
|
|
2021-03-16 08:58:31 +00:00
|
|
|
if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
|
|
|
|
SFC_EFX_DEV_CLASS_NET) {
|
|
|
|
SFC_GENERIC_LOG(DEBUG,
|
|
|
|
"Incompatible device class: skip probing, should be probed by other sfc driver.");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-03-20 10:15:10 +00:00
|
|
|
sfc_register_dp();
|
|
|
|
|
2019-02-07 12:17:26 +00:00
|
|
|
logtype_main = sfc_register_logtype(&pci_dev->addr,
|
|
|
|
SFC_LOGTYPE_MAIN_STR,
|
|
|
|
RTE_LOG_NOTICE);
|
|
|
|
|
2017-05-18 14:00:04 +00:00
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
2019-02-07 12:17:31 +00:00
|
|
|
return -sfc_eth_dev_secondary_init(dev, logtype_main);
|
|
|
|
|
2019-02-07 12:17:52 +00:00
|
|
|
/* Required for logging */
|
2020-09-17 06:34:36 +00:00
|
|
|
ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix),
|
|
|
|
"PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ",
|
|
|
|
pci_dev->addr.domain, pci_dev->addr.bus,
|
|
|
|
pci_dev->addr.devid, pci_dev->addr.function,
|
|
|
|
dev->data->port_id);
|
|
|
|
if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) {
|
|
|
|
SFC_GENERIC_LOG(ERR,
|
|
|
|
"reserved log prefix is too short for " PCI_PRI_FMT,
|
|
|
|
pci_dev->addr.domain, pci_dev->addr.bus,
|
|
|
|
pci_dev->addr.devid, pci_dev->addr.function);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-02-07 12:17:52 +00:00
|
|
|
sas->pci_addr = pci_dev->addr;
|
|
|
|
sas->port_id = dev->data->port_id;
|
|
|
|
|
2019-02-07 12:17:31 +00:00
|
|
|
/*
|
2019-02-07 12:17:52 +00:00
|
|
|
* Allocate process private data from heap, since it should not
|
|
|
|
* be located in shared memory allocated using rte_malloc() API.
|
2019-02-07 12:17:31 +00:00
|
|
|
*/
|
2019-02-07 12:17:52 +00:00
|
|
|
sa = calloc(1, sizeof(*sa));
|
|
|
|
if (sa == NULL) {
|
|
|
|
rc = ENOMEM;
|
|
|
|
goto fail_alloc_sa;
|
|
|
|
}
|
|
|
|
|
2019-02-07 12:17:31 +00:00
|
|
|
dev->process_private = sa;
|
2017-05-18 14:00:04 +00:00
|
|
|
|
2016-11-29 16:18:33 +00:00
|
|
|
/* Required for logging */
|
2019-02-07 12:17:52 +00:00
|
|
|
sa->priv.shared = sas;
|
2019-02-07 12:17:32 +00:00
|
|
|
sa->priv.logtype_main = logtype_main;
|
2017-05-18 14:00:03 +00:00
|
|
|
|
2016-11-29 16:18:33 +00:00
|
|
|
sa->eth_dev = dev;
|
|
|
|
|
|
|
|
/* Copy PCI device info to the dev->data */
|
|
|
|
rte_eth_copy_pci_info(dev, pci_dev);
|
2020-10-14 02:26:47 +00:00
|
|
|
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
|
2020-11-16 19:40:29 +00:00
|
|
|
dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
|
2016-11-29 16:18:33 +00:00
|
|
|
|
|
|
|
rc = sfc_kvargs_parse(sa);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_kvargs_parse;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "entry");
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
|
2016-11-29 16:19:03 +00:00
|
|
|
if (dev->data->mac_addrs == NULL) {
|
|
|
|
rc = ENOMEM;
|
|
|
|
goto fail_mac_addrs;
|
|
|
|
}
|
|
|
|
|
|
|
|
sfc_adapter_lock_init(sa);
|
|
|
|
sfc_adapter_lock(sa);
|
|
|
|
|
2017-03-29 16:59:18 +00:00
|
|
|
sfc_log_init(sa, "probing");
|
|
|
|
rc = sfc_probe(sa);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_probe;
|
|
|
|
|
|
|
|
sfc_log_init(sa, "set device ops");
|
|
|
|
rc = sfc_eth_dev_set_ops(dev);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_set_ops;
|
|
|
|
|
2016-11-29 16:19:03 +00:00
|
|
|
sfc_log_init(sa, "attaching");
|
|
|
|
rc = sfc_attach(sa);
|
|
|
|
if (rc != 0)
|
|
|
|
goto fail_attach;
|
|
|
|
|
|
|
|
encp = efx_nic_cfg_get(sa->nic);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The arguments are really reverse order in comparison to
|
|
|
|
* Linux kernel. Copy from NIC config to Ethernet device data.
|
|
|
|
*/
|
2019-05-21 16:13:03 +00:00
|
|
|
from = (const struct rte_ether_addr *)(encp->enc_mac_addr);
|
2019-05-21 16:13:04 +00:00
|
|
|
rte_ether_addr_copy(from, &dev->data->mac_addrs[0]);
|
2016-11-29 16:19:03 +00:00
|
|
|
|
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
|
2016-11-29 16:18:33 +00:00
|
|
|
sfc_log_init(sa, "done");
|
|
|
|
return 0;
|
|
|
|
|
2016-11-29 16:19:03 +00:00
|
|
|
fail_attach:
|
2017-05-18 14:00:02 +00:00
|
|
|
sfc_eth_dev_clear_ops(dev);
|
|
|
|
|
2017-03-29 16:59:18 +00:00
|
|
|
fail_set_ops:
|
|
|
|
sfc_unprobe(sa);
|
|
|
|
|
|
|
|
fail_probe:
|
2016-11-29 16:19:03 +00:00
|
|
|
sfc_adapter_unlock(sa);
|
|
|
|
sfc_adapter_lock_fini(sa);
|
|
|
|
rte_free(dev->data->mac_addrs);
|
|
|
|
dev->data->mac_addrs = NULL;
|
|
|
|
|
|
|
|
fail_mac_addrs:
|
2016-11-29 16:18:33 +00:00
|
|
|
sfc_kvargs_cleanup(sa);
|
|
|
|
|
|
|
|
fail_kvargs_parse:
|
|
|
|
sfc_log_init(sa, "failed %d", rc);
|
2019-02-07 12:17:31 +00:00
|
|
|
dev->process_private = NULL;
|
2019-02-07 12:17:52 +00:00
|
|
|
free(sa);
|
|
|
|
|
|
|
|
fail_alloc_sa:
|
2016-11-29 16:18:33 +00:00
|
|
|
SFC_ASSERT(rc > 0);
|
|
|
|
return -rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sfc_eth_dev_uninit(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-06-21 09:40:40 +00:00
|
|
|
sfc_dev_close(dev);
|
|
|
|
|
2016-11-29 16:18:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rte_pci_id pci_id_sfc_efx_map[] = {
|
2016-11-29 16:19:03 +00:00
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
|
2017-03-09 17:23:03 +00:00
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
|
2016-11-29 16:19:03 +00:00
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
|
2017-03-09 17:23:03 +00:00
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
|
2016-11-29 16:19:03 +00:00
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
|
2017-03-09 17:23:03 +00:00
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
|
2018-03-21 13:51:32 +00:00
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
|
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
|
2020-10-13 13:45:27 +00:00
|
|
|
{ RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD) },
|
2016-11-29 16:18:33 +00:00
|
|
|
{ .vendor_id = 0 /* sentinel */ }
|
|
|
|
};
|
|
|
|
|
2017-04-11 15:44:24 +00:00
|
|
|
static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
|
|
|
struct rte_pci_device *pci_dev)
|
|
|
|
{
|
|
|
|
return rte_eth_dev_pci_generic_probe(pci_dev,
|
2019-02-07 12:17:52 +00:00
|
|
|
sizeof(struct sfc_adapter_shared), sfc_eth_dev_init);
|
2017-04-11 15:44:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
|
|
|
|
{
|
|
|
|
return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rte_pci_driver sfc_efx_pmd = {
|
|
|
|
.id_table = pci_id_sfc_efx_map,
|
|
|
|
.drv_flags =
|
|
|
|
RTE_PCI_DRV_INTR_LSC |
|
|
|
|
RTE_PCI_DRV_NEED_MAPPING,
|
|
|
|
.probe = sfc_eth_dev_pci_probe,
|
|
|
|
.remove = sfc_eth_dev_pci_remove,
|
2016-11-29 16:18:33 +00:00
|
|
|
};
|
|
|
|
|
2017-04-11 15:44:24 +00:00
|
|
|
RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
|
2016-11-29 16:18:33 +00:00
|
|
|
RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
|
2017-05-20 13:12:37 +00:00
|
|
|
RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
|
2016-11-29 16:18:33 +00:00
|
|
|
RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
|
2017-03-20 10:15:10 +00:00
|
|
|
SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
|
2017-03-20 10:15:14 +00:00
|
|
|
SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
|
2016-12-15 12:50:53 +00:00
|
|
|
SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
|
2018-03-26 12:50:36 +00:00
|
|
|
SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
|
2018-04-19 11:36:59 +00:00
|
|
|
SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
|
2018-03-21 11:28:21 +00:00
|
|
|
SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
|
2018-03-21 11:28:17 +00:00
|
|
|
|
2018-06-18 12:32:21 +00:00
|
|
|
RTE_INIT(sfc_driver_register_logtype)
|
2018-03-21 11:28:17 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
|
|
|
|
RTE_LOG_NOTICE);
|
|
|
|
sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;
|
|
|
|
}
|