net/ice: add DCF hardware initialization
Introduce the DCF (Device Config Function) feature in the ice PMD, it works as a standalone PMD which doesn't handle the packet Rx/Tx related things. Its hardware entity is the VF. Add the basic DCF hardware initialization, this is specified by devarg 'cap=dcf'. Signed-off-by: Haiyue Wang <haiyue.wang@intel.com> Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
parent
4cce7422dd
commit
7564d55096
@ -240,6 +240,53 @@ report a MDD event and drop the packets.
|
||||
|
||||
The APPs based on DPDK should avoid providing such packets.
|
||||
|
||||
Device Config Function (DCF)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This section demonstrates ICE DCF PMD, which shares the core module with ICE
|
||||
PMD and iAVF PMD.
|
||||
|
||||
A DCF (Device Config Function) PMD bounds to the device's trusted VF with ID 0,
|
||||
it can act as a sole controlling entity to exercise advance functionality (such
|
||||
as switch, ACL) for the rest VFs.
|
||||
|
||||
The DCF PMD needs to advertise and acquire DCF capability which allows DCF to
|
||||
send AdminQ commands that it would like to execute over to the PF and receive
|
||||
responses for the same from PF.
|
||||
|
||||
.. _figure_ice_dcf:
|
||||
|
||||
.. figure:: img/ice_dcf.*
|
||||
|
||||
DCF Communication flow.
|
||||
|
||||
#. Create the VFs::
|
||||
|
||||
echo 4 > /sys/bus/pci/devices/0000\:18\:00.0/sriov_numvfs
|
||||
|
||||
#. Enable the VF0 trust on::
|
||||
|
||||
ip link set dev enp24s0f0 vf 0 trust on
|
||||
|
||||
#. Bind the VF0, and run testpmd with 'cap=dcf' devarg::
|
||||
|
||||
testpmd -l 22-25 -n 4 -w 18:01.0,cap=dcf -- -i
|
||||
|
||||
#. Monitor the VF2 interface network traffic::
|
||||
|
||||
tcpdump -e -nn -i enp24s1f2
|
||||
|
||||
#. Create one flow to redirect the traffic to VF2 by DCF::
|
||||
|
||||
flow create 0 priority 0 ingress pattern eth / ipv4 src is 192.168.0.2 \
|
||||
dst is 192.168.0.3 / end actions vf id 2 / end
|
||||
|
||||
#. Send the packet, and it should be displayed on tcpdump::
|
||||
|
||||
sendp(Ether(src='3c:fd:fe:aa:bb:78', dst='00:00:00:01:02:03')/IP(src=' \
|
||||
192.168.0.2', dst="192.168.0.3")/TCP(flags='S')/Raw(load='XXXXXXXXXX'), \
|
||||
iface="enp24s0f0", count=10)
|
||||
|
||||
Sample Application Notes
|
||||
------------------------
|
||||
|
||||
|
516
doc/guides/nics/img/ice_dcf.svg
Normal file
516
doc/guides/nics/img/ice_dcf.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 53 KiB |
@ -82,6 +82,12 @@ New Features
|
||||
(enqueue/dequeue start; enqueue/dequeue finish). That allows user to inspect
|
||||
objects in the ring without removing them from it (aka MT safe peek).
|
||||
|
||||
* **Updated the Intel ice driver.**
|
||||
|
||||
Updated the Intel ice driver with new features and improvements, including:
|
||||
|
||||
* Added support for DCF (Device Config Function) feature.
|
||||
|
||||
* **Updated Mellanox mlx5 driver.**
|
||||
|
||||
Updated Mellanox mlx5 driver with new features and improvements, including:
|
||||
|
@ -31,6 +31,7 @@ DIRS-y += dpaax
|
||||
endif
|
||||
|
||||
IAVF-y := $(CONFIG_RTE_LIBRTE_IAVF_PMD)
|
||||
IAVF-y += $(CONFIG_RTE_LIBRTE_ICE_PMD)
|
||||
ifneq (,$(findstring y,$(IAVF-y)))
|
||||
DIRS-y += iavf
|
||||
endif
|
||||
|
@ -10,9 +10,11 @@ LIB = librte_pmd_ice.a
|
||||
|
||||
CFLAGS += -O3
|
||||
CFLAGS += $(WERROR_FLAGS)
|
||||
CFLAGS += -I$(RTE_SDK)/drivers/common/iavf
|
||||
|
||||
LDLIBS += -lrte_eal -lrte_mbuf -lrte_ethdev -lrte_kvargs
|
||||
LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_hash
|
||||
LDLIBS += -lrte_net -lrte_common_iavf
|
||||
|
||||
EXPORT_MAP := rte_pmd_ice_version.map
|
||||
|
||||
@ -83,6 +85,9 @@ ifeq ($(CC_AVX2_SUPPORT), 1)
|
||||
endif
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
|
||||
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcf.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcf_ethdev.c
|
||||
|
||||
# install this header file
|
||||
SYMLINK-$(CONFIG_RTE_LIBRTE_ICE_PMD)-include := rte_pmd_ice.h
|
||||
|
||||
|
474
drivers/net/ice/ice_dcf.c
Normal file
474
drivers/net/ice/ice_dcf.c
Normal file
@ -0,0 +1,474 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <sys/queue.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <stdarg.h>
|
||||
#include <inttypes.h>
|
||||
#include <rte_byteorder.h>
|
||||
#include <rte_common.h>
|
||||
|
||||
#include <rte_pci.h>
|
||||
#include <rte_atomic.h>
|
||||
#include <rte_eal.h>
|
||||
#include <rte_ether.h>
|
||||
#include <rte_ethdev_driver.h>
|
||||
#include <rte_ethdev_pci.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_memzone.h>
|
||||
#include <rte_dev.h>
|
||||
|
||||
#include "ice_dcf.h"
|
||||
|
||||
#define ICE_DCF_AQ_LEN 32
|
||||
#define ICE_DCF_AQ_BUF_SZ 4096
|
||||
|
||||
#define ICE_DCF_ARQ_MAX_RETRIES 200
|
||||
#define ICE_DCF_ARQ_CHECK_TIME 2 /* msecs */
|
||||
|
||||
#define ICE_DCF_VF_RES_BUF_SZ \
|
||||
(sizeof(struct virtchnl_vf_resource) + \
|
||||
IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
|
||||
|
||||
static __rte_always_inline int
|
||||
ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
|
||||
uint8_t *req_msg, uint16_t req_msglen)
|
||||
{
|
||||
return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
|
||||
req_msg, req_msglen, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
|
||||
uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
|
||||
uint16_t *rsp_msglen)
|
||||
{
|
||||
struct iavf_arq_event_info event;
|
||||
enum virtchnl_ops v_op;
|
||||
int i = 0;
|
||||
int err;
|
||||
|
||||
event.buf_len = rsp_buflen;
|
||||
event.msg_buf = rsp_msgbuf;
|
||||
|
||||
do {
|
||||
err = iavf_clean_arq_element(&hw->avf, &event, NULL);
|
||||
if (err != IAVF_SUCCESS)
|
||||
goto again;
|
||||
|
||||
v_op = rte_le_to_cpu_32(event.desc.cookie_high);
|
||||
if (v_op != op)
|
||||
goto again;
|
||||
|
||||
if (rsp_msglen != NULL)
|
||||
*rsp_msglen = event.msg_len;
|
||||
return rte_le_to_cpu_32(event.desc.cookie_low);
|
||||
|
||||
again:
|
||||
rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
|
||||
} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
|
||||
{
|
||||
rte_spinlock_lock(&hw->vc_cmd_queue_lock);
|
||||
|
||||
TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
|
||||
|
||||
rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
|
||||
{
|
||||
cmd->v_ret = IAVF_ERR_NOT_READY;
|
||||
cmd->rsp_msglen = 0;
|
||||
cmd->pending = 1;
|
||||
|
||||
rte_spinlock_lock(&hw->vc_cmd_queue_lock);
|
||||
|
||||
TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
|
||||
|
||||
rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
|
||||
}
|
||||
|
||||
static __rte_always_inline int
|
||||
ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
|
||||
{
|
||||
return iavf_aq_send_msg_to_pf(&hw->avf,
|
||||
cmd->v_op, IAVF_SUCCESS,
|
||||
cmd->req_msg, cmd->req_msglen, NULL);
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
|
||||
{
|
||||
struct dcf_virtchnl_cmd *cmd;
|
||||
enum virtchnl_ops v_op;
|
||||
enum iavf_status v_ret;
|
||||
uint16_t aq_op;
|
||||
|
||||
aq_op = rte_le_to_cpu_16(info->desc.opcode);
|
||||
if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Request %u is not supported yet", aq_op);
|
||||
return;
|
||||
}
|
||||
|
||||
v_op = rte_le_to_cpu_32(info->desc.cookie_high);
|
||||
if (unlikely(v_op == VIRTCHNL_OP_EVENT))
|
||||
return;
|
||||
|
||||
v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
|
||||
|
||||
rte_spinlock_lock(&hw->vc_cmd_queue_lock);
|
||||
|
||||
TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
|
||||
if (cmd->v_op == v_op && cmd->pending) {
|
||||
cmd->v_ret = v_ret;
|
||||
cmd->rsp_msglen = RTE_MIN(info->msg_len,
|
||||
cmd->rsp_buflen);
|
||||
if (likely(cmd->rsp_msglen != 0))
|
||||
rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
|
||||
cmd->rsp_msglen);
|
||||
|
||||
/* prevent compiler reordering */
|
||||
rte_compiler_barrier();
|
||||
cmd->pending = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
|
||||
{
|
||||
struct iavf_arq_event_info info;
|
||||
uint16_t pending = 1;
|
||||
int ret;
|
||||
|
||||
info.buf_len = ICE_DCF_AQ_BUF_SZ;
|
||||
info.msg_buf = hw->arq_buf;
|
||||
|
||||
while (pending) {
|
||||
ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
|
||||
if (ret != IAVF_SUCCESS)
|
||||
break;
|
||||
|
||||
ice_dcf_aq_cmd_handle(hw, &info);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
|
||||
{
|
||||
#define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START 1
|
||||
#define ICE_CPF_VIRTCHNL_VERSION_MINOR_START 1
|
||||
struct virtchnl_version_info version, *pver;
|
||||
int err;
|
||||
|
||||
version.major = VIRTCHNL_VERSION_MAJOR;
|
||||
version.minor = VIRTCHNL_VERSION_MINOR;
|
||||
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
|
||||
(uint8_t *)&version, sizeof(version));
|
||||
if (err) {
|
||||
PMD_INIT_LOG(ERR, "Failed to send OP_VERSION");
|
||||
return err;
|
||||
}
|
||||
|
||||
pver = &hw->virtchnl_version;
|
||||
err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
|
||||
(uint8_t *)pver, sizeof(*pver), NULL);
|
||||
if (err) {
|
||||
PMD_INIT_LOG(ERR, "Failed to get response of OP_VERSION");
|
||||
return -1;
|
||||
}
|
||||
|
||||
PMD_INIT_LOG(DEBUG,
|
||||
"Peer PF API version: %u.%u", pver->major, pver->minor);
|
||||
|
||||
if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
|
||||
(pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
|
||||
pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"VIRTCHNL API version should not be lower than (%u.%u)",
|
||||
ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
|
||||
ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
|
||||
return -1;
|
||||
} else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
|
||||
(pver->major == VIRTCHNL_VERSION_MAJOR &&
|
||||
pver->minor > VIRTCHNL_VERSION_MINOR)) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"PF/VF API version mismatch:(%u.%u)-(%u.%u)",
|
||||
pver->major, pver->minor,
|
||||
VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
|
||||
return -1;
|
||||
}
|
||||
|
||||
PMD_INIT_LOG(DEBUG, "Peer is supported PF host");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
|
||||
{
|
||||
uint32_t caps;
|
||||
int err, i;
|
||||
|
||||
caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
|
||||
VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
|
||||
VF_BASE_MODE_OFFLOADS;
|
||||
|
||||
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
|
||||
(uint8_t *)&caps, sizeof(caps));
|
||||
if (err) {
|
||||
PMD_DRV_LOG(ERR, "Failed to send msg OP_GET_VF_RESOURCE");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
|
||||
(uint8_t *)hw->vf_res,
|
||||
ICE_DCF_VF_RES_BUF_SZ, NULL);
|
||||
if (err) {
|
||||
PMD_DRV_LOG(ERR, "Failed to get response of OP_GET_VF_RESOURCE");
|
||||
return -1;
|
||||
}
|
||||
|
||||
iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
|
||||
|
||||
hw->vsi_res = NULL;
|
||||
for (i = 0; i < hw->vf_res->num_vsis; i++) {
|
||||
if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
|
||||
hw->vsi_res = &hw->vf_res->vsi_res[i];
|
||||
}
|
||||
|
||||
if (!hw->vsi_res) {
|
||||
PMD_DRV_LOG(ERR, "no LAN VSI found");
|
||||
return -1;
|
||||
}
|
||||
|
||||
hw->vsi_id = hw->vsi_res->vsi_id;
|
||||
PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
|
||||
{
|
||||
#define ICE_DCF_RESET_WAIT_CNT 50
|
||||
struct iavf_hw *avf = &hw->avf;
|
||||
int i, reset;
|
||||
|
||||
for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
|
||||
reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
|
||||
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
|
||||
reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
|
||||
|
||||
if (reset == VIRTCHNL_VFR_VFACTIVE ||
|
||||
reset == VIRTCHNL_VFR_COMPLETED)
|
||||
break;
|
||||
|
||||
rte_delay_ms(20);
|
||||
}
|
||||
|
||||
if (i >= ICE_DCF_RESET_WAIT_CNT)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
|
||||
{
|
||||
struct iavf_hw *avf = &hw->avf;
|
||||
|
||||
/* Enable admin queue interrupt trigger */
|
||||
IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
|
||||
IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
|
||||
IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
|
||||
IAVF_VFINT_DYN_CTL01_INTENA_MASK |
|
||||
IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
|
||||
IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
|
||||
|
||||
IAVF_WRITE_FLUSH(avf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
|
||||
{
|
||||
struct iavf_hw *avf = &hw->avf;
|
||||
|
||||
/* Disable all interrupt types */
|
||||
IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
|
||||
IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
|
||||
IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
|
||||
|
||||
IAVF_WRITE_FLUSH(avf);
|
||||
}
|
||||
|
||||
static void
|
||||
ice_dcf_dev_interrupt_handler(void *param)
|
||||
{
|
||||
struct ice_dcf_hw *hw = param;
|
||||
|
||||
ice_dcf_disable_irq0(hw);
|
||||
|
||||
ice_dcf_handle_virtchnl_msg(hw);
|
||||
|
||||
ice_dcf_enable_irq0(hw);
|
||||
}
|
||||
|
||||
int
|
||||
ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
|
||||
struct dcf_virtchnl_cmd *cmd)
|
||||
{
|
||||
int i = 0;
|
||||
int err;
|
||||
|
||||
if ((cmd->req_msg && !cmd->req_msglen) ||
|
||||
(!cmd->req_msg && cmd->req_msglen) ||
|
||||
(cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
|
||||
(!cmd->rsp_msgbuf && cmd->rsp_buflen))
|
||||
return -EINVAL;
|
||||
|
||||
rte_spinlock_lock(&hw->vc_cmd_send_lock);
|
||||
ice_dcf_vc_cmd_set(hw, cmd);
|
||||
|
||||
err = ice_dcf_vc_cmd_send(hw, cmd);
|
||||
if (err) {
|
||||
PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
|
||||
goto ret;
|
||||
}
|
||||
|
||||
do {
|
||||
if (!cmd->pending)
|
||||
break;
|
||||
|
||||
rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
|
||||
} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
|
||||
|
||||
if (cmd->v_ret != IAVF_SUCCESS) {
|
||||
err = -1;
|
||||
PMD_DRV_LOG(ERR,
|
||||
"No response (%d times) or return failure (%d) for cmd %d",
|
||||
i, cmd->v_ret, cmd->v_op);
|
||||
}
|
||||
|
||||
ret:
|
||||
ice_dcf_aq_cmd_clear(hw, cmd);
|
||||
rte_spinlock_unlock(&hw->vc_cmd_send_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
|
||||
{
|
||||
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
|
||||
int ret;
|
||||
|
||||
hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
|
||||
hw->avf.back = hw;
|
||||
|
||||
hw->avf.bus.bus_id = pci_dev->addr.bus;
|
||||
hw->avf.bus.device = pci_dev->addr.devid;
|
||||
hw->avf.bus.func = pci_dev->addr.function;
|
||||
|
||||
hw->avf.device_id = pci_dev->id.device_id;
|
||||
hw->avf.vendor_id = pci_dev->id.vendor_id;
|
||||
hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
|
||||
hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
|
||||
|
||||
hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
|
||||
hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
|
||||
hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
|
||||
hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
|
||||
|
||||
rte_spinlock_init(&hw->vc_cmd_send_lock);
|
||||
rte_spinlock_init(&hw->vc_cmd_queue_lock);
|
||||
TAILQ_INIT(&hw->vc_cmd_queue);
|
||||
|
||||
hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
|
||||
if (hw->arq_buf == NULL) {
|
||||
PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = iavf_set_mac_type(&hw->avf);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = ice_dcf_check_reset_done(hw);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR, "VF is still resetting");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = iavf_init_adminq(&hw->avf);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (ice_dcf_init_check_api_version(hw)) {
|
||||
PMD_INIT_LOG(ERR, "check_api version failed");
|
||||
goto err_api;
|
||||
}
|
||||
|
||||
hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
|
||||
if (hw->vf_res == NULL) {
|
||||
PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
|
||||
goto err_api;
|
||||
}
|
||||
|
||||
if (ice_dcf_get_vf_resource(hw)) {
|
||||
PMD_INIT_LOG(ERR, "Failed to get VF resource");
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
rte_intr_callback_register(&pci_dev->intr_handle,
|
||||
ice_dcf_dev_interrupt_handler, hw);
|
||||
rte_intr_enable(&pci_dev->intr_handle);
|
||||
ice_dcf_enable_irq0(hw);
|
||||
|
||||
return 0;
|
||||
|
||||
err_alloc:
|
||||
rte_free(hw->vf_res);
|
||||
err_api:
|
||||
iavf_shutdown_adminq(&hw->avf);
|
||||
err:
|
||||
rte_free(hw->arq_buf);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void
|
||||
ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
|
||||
{
|
||||
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
|
||||
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||
|
||||
ice_dcf_disable_irq0(hw);
|
||||
rte_intr_disable(intr_handle);
|
||||
rte_intr_callback_unregister(intr_handle,
|
||||
ice_dcf_dev_interrupt_handler, hw);
|
||||
|
||||
iavf_shutdown_adminq(&hw->avf);
|
||||
|
||||
rte_free(hw->arq_buf);
|
||||
rte_free(hw->vf_res);
|
||||
}
|
52
drivers/net/ice/ice_dcf.h
Normal file
52
drivers/net/ice/ice_dcf.h
Normal file
@ -0,0 +1,52 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _ICE_DCF_H_
|
||||
#define _ICE_DCF_H_
|
||||
|
||||
#include <rte_ethdev_driver.h>
|
||||
|
||||
#include <iavf_prototype.h>
|
||||
#include <iavf_adminq_cmd.h>
|
||||
#include <iavf_type.h>
|
||||
|
||||
#include "ice_logs.h"
|
||||
|
||||
struct dcf_virtchnl_cmd {
|
||||
TAILQ_ENTRY(dcf_virtchnl_cmd) next;
|
||||
|
||||
enum virtchnl_ops v_op;
|
||||
enum iavf_status v_ret;
|
||||
|
||||
uint16_t req_msglen;
|
||||
uint8_t *req_msg;
|
||||
|
||||
uint16_t rsp_msglen;
|
||||
uint16_t rsp_buflen;
|
||||
uint8_t *rsp_msgbuf;
|
||||
|
||||
volatile int pending;
|
||||
};
|
||||
|
||||
struct ice_dcf_hw {
|
||||
struct iavf_hw avf;
|
||||
|
||||
rte_spinlock_t vc_cmd_send_lock;
|
||||
rte_spinlock_t vc_cmd_queue_lock;
|
||||
TAILQ_HEAD(, dcf_virtchnl_cmd) vc_cmd_queue;
|
||||
uint8_t *arq_buf;
|
||||
|
||||
struct virtchnl_version_info virtchnl_version;
|
||||
struct virtchnl_vf_resource *vf_res; /* VF resource */
|
||||
struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
|
||||
uint16_t vsi_id;
|
||||
};
|
||||
|
||||
int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
|
||||
struct dcf_virtchnl_cmd *cmd);
|
||||
|
||||
int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
|
||||
void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
|
||||
|
||||
#endif /* _ICE_DCF_H_ */
|
317
drivers/net/ice/ice_dcf_ethdev.c
Normal file
317
drivers/net/ice/ice_dcf_ethdev.c
Normal file
@ -0,0 +1,317 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdbool.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <rte_interrupts.h>
|
||||
#include <rte_debug.h>
|
||||
#include <rte_pci.h>
|
||||
#include <rte_atomic.h>
|
||||
#include <rte_eal.h>
|
||||
#include <rte_ether.h>
|
||||
#include <rte_ethdev_pci.h>
|
||||
#include <rte_kvargs.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_memzone.h>
|
||||
#include <rte_dev.h>
|
||||
|
||||
#include <iavf_devids.h>
|
||||
|
||||
#include "ice_generic_flow.h"
|
||||
#include "ice_dcf_ethdev.h"
|
||||
|
||||
static uint16_t
|
||||
ice_dcf_recv_pkts(__rte_unused void *rx_queue,
|
||||
__rte_unused struct rte_mbuf **bufs,
|
||||
__rte_unused uint16_t nb_pkts)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
|
||||
__rte_unused struct rte_mbuf **bufs,
|
||||
__rte_unused uint16_t nb_pkts)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
dev->data->dev_link.link_status = ETH_LINK_UP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
ice_dcf_dev_stop(struct rte_eth_dev *dev)
|
||||
{
|
||||
dev->data->dev_link.link_status = ETH_LINK_DOWN;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_configure(__rte_unused struct rte_eth_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_info_get(struct rte_eth_dev *dev,
|
||||
struct rte_eth_dev_info *dev_info)
|
||||
{
|
||||
struct ice_dcf_adapter *adapter = dev->data->dev_private;
|
||||
|
||||
dev_info->max_mac_addrs = 1;
|
||||
dev_info->max_rx_pktlen = (uint32_t)-1;
|
||||
dev_info->max_rx_queues = RTE_DIM(adapter->rxqs);
|
||||
dev_info->max_tx_queues = RTE_DIM(adapter->txqs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev,
|
||||
__rte_unused struct rte_eth_stats *igb_stats)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
|
||||
enum rte_filter_type filter_type,
|
||||
__rte_unused enum rte_filter_op filter_op,
|
||||
__rte_unused void *arg)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
switch (filter_type) {
|
||||
default:
|
||||
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
|
||||
filter_type);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
ice_dcf_dev_close(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ice_dcf_adapter *adapter = dev->data->dev_private;
|
||||
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
||||
return;
|
||||
|
||||
dev->dev_ops = NULL;
|
||||
dev->rx_pkt_burst = NULL;
|
||||
dev->tx_pkt_burst = NULL;
|
||||
dev->data->mac_addrs = NULL;
|
||||
|
||||
ice_dcf_uninit_hw(dev, &adapter->real_hw);
|
||||
}
|
||||
|
||||
static void
|
||||
ice_dcf_queue_release(__rte_unused void *q)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
|
||||
__rte_unused int wait_to_complete)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t rx_queue_id,
|
||||
__rte_unused uint16_t nb_rx_desc,
|
||||
__rte_unused unsigned int socket_id,
|
||||
__rte_unused const struct rte_eth_rxconf *rx_conf,
|
||||
__rte_unused struct rte_mempool *mb_pool)
|
||||
{
|
||||
struct ice_dcf_adapter *adapter = dev->data->dev_private;
|
||||
|
||||
dev->data->rx_queues[rx_queue_id] = &adapter->rxqs[rx_queue_id];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t tx_queue_id,
|
||||
__rte_unused uint16_t nb_tx_desc,
|
||||
__rte_unused unsigned int socket_id,
|
||||
__rte_unused const struct rte_eth_txconf *tx_conf)
|
||||
{
|
||||
struct ice_dcf_adapter *adapter = dev->data->dev_private;
|
||||
|
||||
dev->data->tx_queues[tx_queue_id] = &adapter->txqs[tx_queue_id];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
|
||||
.dev_start = ice_dcf_dev_start,
|
||||
.dev_stop = ice_dcf_dev_stop,
|
||||
.dev_close = ice_dcf_dev_close,
|
||||
.dev_configure = ice_dcf_dev_configure,
|
||||
.dev_infos_get = ice_dcf_dev_info_get,
|
||||
.rx_queue_setup = ice_dcf_rx_queue_setup,
|
||||
.tx_queue_setup = ice_dcf_tx_queue_setup,
|
||||
.rx_queue_release = ice_dcf_queue_release,
|
||||
.tx_queue_release = ice_dcf_queue_release,
|
||||
.link_update = ice_dcf_link_update,
|
||||
.stats_get = ice_dcf_stats_get,
|
||||
.stats_reset = ice_dcf_stats_reset,
|
||||
.promiscuous_enable = ice_dcf_dev_promiscuous_enable,
|
||||
.promiscuous_disable = ice_dcf_dev_promiscuous_disable,
|
||||
.allmulticast_enable = ice_dcf_dev_allmulticast_enable,
|
||||
.allmulticast_disable = ice_dcf_dev_allmulticast_disable,
|
||||
.filter_ctrl = ice_dcf_dev_filter_ctrl,
|
||||
};
|
||||
|
||||
static int
|
||||
ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
|
||||
|
||||
eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
|
||||
eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
|
||||
eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
|
||||
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
||||
return 0;
|
||||
|
||||
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
|
||||
|
||||
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
|
||||
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");
|
||||
return -1;
|
||||
}
|
||||
|
||||
rte_eth_random_addr(adapter->mac_addr.addr_bytes);
|
||||
eth_dev->data->mac_addrs = &adapter->mac_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
ice_dcf_dev_close(eth_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_cap_check_handler(__rte_unused const char *key,
|
||||
const char *value, __rte_unused void *opaque)
|
||||
{
|
||||
if (strcmp(value, "dcf"))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_cap_selected(struct rte_devargs *devargs)
|
||||
{
|
||||
struct rte_kvargs *kvlist;
|
||||
const char *key = "cap";
|
||||
int ret = 0;
|
||||
|
||||
if (devargs == NULL)
|
||||
return 0;
|
||||
|
||||
kvlist = rte_kvargs_parse(devargs->args, NULL);
|
||||
if (kvlist == NULL)
|
||||
return 0;
|
||||
|
||||
if (!rte_kvargs_count(kvlist, key))
|
||||
goto exit;
|
||||
|
||||
/* dcf capability selected when there's a key-value pair: cap=dcf */
|
||||
if (rte_kvargs_process(kvlist, key,
|
||||
ice_dcf_cap_check_handler, NULL) < 0)
|
||||
goto exit;
|
||||
|
||||
ret = 1;
|
||||
|
||||
exit:
|
||||
rte_kvargs_free(kvlist);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
|
||||
struct rte_pci_device *pci_dev)
|
||||
{
|
||||
if (!ice_dcf_cap_selected(pci_dev->device.devargs))
|
||||
return 1;
|
||||
|
||||
return rte_eth_dev_pci_generic_probe(pci_dev,
|
||||
sizeof(struct ice_dcf_adapter),
|
||||
ice_dcf_dev_init);
|
||||
}
|
||||
|
||||
static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
|
||||
{
|
||||
return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
|
||||
}
|
||||
|
||||
static const struct rte_pci_id pci_id_ice_dcf_map[] = {
|
||||
{ RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
|
||||
{ .vendor_id = 0, /* sentinel */ },
|
||||
};
|
||||
|
||||
static struct rte_pci_driver rte_ice_dcf_pmd = {
|
||||
.id_table = pci_id_ice_dcf_map,
|
||||
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
|
||||
.probe = eth_ice_dcf_pci_probe,
|
||||
.remove = eth_ice_dcf_pci_remove,
|
||||
};
|
||||
|
||||
RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
|
||||
RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
|
||||
RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
|
||||
RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");
|
24
drivers/net/ice/ice_dcf_ethdev.h
Normal file
24
drivers/net/ice/ice_dcf_ethdev.h
Normal file
@ -0,0 +1,24 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _ICE_DCF_ETHDEV_H_
|
||||
#define _ICE_DCF_ETHDEV_H_
|
||||
|
||||
#include "ice_ethdev.h"
|
||||
#include "ice_dcf.h"
|
||||
|
||||
#define ICE_DCF_MAX_RINGS 1
|
||||
|
||||
struct ice_dcf_queue {
|
||||
uint64_t dummy;
|
||||
};
|
||||
|
||||
struct ice_dcf_adapter {
|
||||
struct ice_dcf_hw real_hw;
|
||||
struct rte_ether_addr mac_addr;
|
||||
struct ice_dcf_queue rxqs[ICE_DCF_MAX_RINGS];
|
||||
struct ice_dcf_queue txqs[ICE_DCF_MAX_RINGS];
|
||||
};
|
||||
|
||||
#endif /* _ICE_DCF_ETHDEV_H_ */
|
@ -13,8 +13,8 @@ sources = files(
|
||||
'ice_hash.c'
|
||||
)
|
||||
|
||||
deps += ['hash']
|
||||
includes += include_directories('base')
|
||||
deps += ['hash', 'net', 'common_iavf']
|
||||
includes += include_directories('base', '../../common/iavf')
|
||||
|
||||
if arch_subdir == 'x86'
|
||||
sources += files('ice_rxtx_vec_sse.c')
|
||||
@ -35,4 +35,7 @@ if arch_subdir == 'x86'
|
||||
endif
|
||||
endif
|
||||
|
||||
sources += files('ice_dcf.c',
|
||||
'ice_dcf_ethdev.c')
|
||||
|
||||
install_headers('rte_pmd_ice.h')
|
||||
|
@ -185,6 +185,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += -lrte_pmd_i40e
|
||||
_LDLIBS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += -lrte_pmd_iavf
|
||||
_LDLIBS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += -lrte_pmd_ice
|
||||
IAVF-y := $(CONFIG_RTE_LIBRTE_IAVF_PMD)
|
||||
IAVF-y += $(CONFIG_RTE_LIBRTE_ICE_PMD)
|
||||
ifeq ($(findstring y,$(IAVF-y)),y)
|
||||
_LDLIBS-y += -lrte_common_iavf
|
||||
endif
|
||||
|
Loading…
Reference in New Issue
Block a user