net/idpf: support device initialization

Support device init and add the following dev ops:
 - dev_configure
 - dev_close
 - dev_infos_get

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
This commit is contained in:
Junfeng Guo 2022-10-31 08:33:30 +00:00 committed by Thomas Monjalon
parent fb4ac04e9b
commit 549343c25d
12 changed files with 1662 additions and 0 deletions

View File

@ -769,6 +769,15 @@ F: drivers/net/ice/
F: doc/guides/nics/ice.rst F: doc/guides/nics/ice.rst
F: doc/guides/nics/features/ice.ini F: doc/guides/nics/features/ice.ini
Intel idpf
M: Jingjing Wu <jingjing.wu@intel.com>
M: Beilei Xing <beilei.xing@intel.com>
T: git://dpdk.org/next/dpdk-next-net-intel
F: drivers/net/idpf/
F: drivers/common/idpf/
F: doc/guides/nics/idpf.rst
F: doc/guides/nics/features/idpf.ini
Intel igc Intel igc
M: Junfeng Guo <junfeng.guo@intel.com> M: Junfeng Guo <junfeng.guo@intel.com>
M: Simei Su <simei.su@intel.com> M: Simei Su <simei.su@intel.com>

View File

@ -0,0 +1,9 @@
;
; Supported features of the 'idpf' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Linux = Y
x86-32 = Y
x86-64 = Y

66
doc/guides/nics/idpf.rst Normal file
View File

@ -0,0 +1,66 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2022 Intel Corporation.
.. include:: <isonum.txt>
IDPF Poll Mode Driver
=====================
The [*EXPERIMENTAL*] idpf PMD (**librte_net_idpf**) provides poll mode driver support
for Intel\ |reg| Infrastructure Processing Unit (Intel\ |reg| IPU) E2000.
Linux Prerequisites
-------------------
Follow the DPDK :doc:`../linux_gsg/index` to setup the basic DPDK environment.
To get better performance on Intel platforms,
please follow the :doc:`../linux_gsg/nic_perf_intel_platform`.
Pre-Installation Configuration
------------------------------
Runtime Config Options
~~~~~~~~~~~~~~~~~~~~~~
- ``vport`` (default ``0``)
The PMD supports creation of multiple vports for one PCI device,
each vport corresponds to a single ethdev.
The user can specify the vports with specific ID to be created, for example::
-a ca:00.0,vport=[0,2,3]
Then the PMD will create 3 vports (ethdevs) for device ``ca:00.0``.
If the parameter is not provided, the vport 0 will be created by default.
- ``rx_single`` (default ``0``)
There are two queue modes supported by Intel\ |reg| IPU Ethernet ES2000 Series,
single queue mode and split queue mode for Rx queue.
User can choose Rx queue mode, example::
-a ca:00.0,rx_single=1
Then the PMD will configure Rx queue with single queue mode.
Otherwise, split queue mode is chosen by default.
- ``tx_single`` (default ``0``)
There are two queue modes supported by Intel\ |reg| IPU Ethernet ES2000 Series,
single queue mode and split queue mode for Tx queue.
User can choose Tx queue mode, example::
-a ca:00.0,tx_single=1
Then the PMD will configure Tx queue with single queue mode.
Otherwise, split queue mode is chosen by default.
Driver compilation and testing
------------------------------
Refer to the document :doc:`build_and_test` for details.

View File

@ -34,6 +34,7 @@ Network Interface Controller Drivers
hns3 hns3
i40e i40e
ice ice
idpf
igb igb
igc igc
ionic ionic

View File

@ -158,6 +158,12 @@ New Features
* Added protocol based buffer split support in scalar path. * Added protocol based buffer split support in scalar path.
* **Added Intel idpf driver.**
Added the new ``idpf`` net driver
for Intel\ |reg| Infrastructure Processing Unit (Intel\ |reg| IPU) E2000.
See the :doc:`../nics/idpf` NIC guide for more details on this new driver.
* **Updated Marvell cnxk driver.** * **Updated Marvell cnxk driver.**
* Added support for flow action REPRESENTED_PORT. * Added support for flow action REPRESENTED_PORT.

View File

@ -0,0 +1,891 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2022 Intel Corporation
*/
#include <rte_atomic.h>
#include <rte_eal.h>
#include <rte_ether.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_dev.h>
#include <errno.h>
#include "idpf_ethdev.h"
#define IDPF_TX_SINGLE_Q "tx_single"
#define IDPF_RX_SINGLE_Q "rx_single"
#define IDPF_VPORT "vport"
rte_spinlock_t idpf_adapter_lock;
/* A list for all adapters, one adapter matches one PCI device */
struct idpf_adapter_list idpf_adapter_list;
bool idpf_adapter_list_init;
static const char * const idpf_valid_args[] = {
IDPF_TX_SINGLE_Q,
IDPF_RX_SINGLE_Q,
IDPF_VPORT,
NULL
};
static int
idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_adapter *adapter = vport->adapter;
dev_info->max_rx_queues = adapter->caps->max_rx_q;
dev_info->max_tx_queues = adapter->caps->max_tx_q;
dev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE;
dev_info->max_rx_pktlen = IDPF_MAX_FRAME_SIZE;
dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
return 0;
}
static int
idpf_init_vport_req_info(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_info;
uint16_t idx = adapter->cur_vport_idx;
if (idx == IDPF_INVALID_VPORT_IDX) {
PMD_INIT_LOG(ERR, "Invalid vport index.");
return -EINVAL;
}
if (adapter->vport_req_info[idx] == NULL) {
adapter->vport_req_info[idx] = rte_zmalloc(NULL,
sizeof(struct virtchnl2_create_vport), 0);
if (adapter->vport_req_info[idx] == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info");
return -ENOMEM;
}
}
vport_info =
(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
if (adapter->txq_model == 0) {
vport_info->txq_model =
rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
vport_info->num_tx_complq =
IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP;
} else {
vport_info->txq_model =
rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
vport_info->num_tx_complq = 0;
}
if (adapter->rxq_model == 0) {
vport_info->rxq_model =
rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
vport_info->num_rx_bufq =
IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP;
} else {
vport_info->rxq_model =
rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
vport_info->num_rx_bufq = 0;
}
return 0;
}
static int
idpf_parse_devarg_id(char *name)
{
uint16_t val;
char *p;
p = strstr(name, "vport_");
if (p == NULL)
return -EINVAL;
p += sizeof("vport_") - 1;
val = strtoul(p, NULL, 10);
return val;
}
static int
idpf_init_vport(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_adapter *adapter = vport->adapter;
uint16_t idx = adapter->cur_vport_idx;
struct virtchnl2_create_vport *vport_info =
(struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];
int i, type, ret;
vport->vport_id = vport_info->vport_id;
vport->txq_model = vport_info->txq_model;
vport->rxq_model = vport_info->rxq_model;
vport->num_tx_q = vport_info->num_tx_q;
vport->num_tx_complq = vport_info->num_tx_complq;
vport->num_rx_q = vport_info->num_rx_q;
vport->num_rx_bufq = vport_info->num_rx_bufq;
vport->max_mtu = vport_info->max_mtu;
rte_memcpy(vport->default_mac_addr,
vport_info->default_mac_addr, ETH_ALEN);
vport->sw_idx = idx;
for (i = 0; i < vport_info->chunks.num_chunks; i++) {
type = vport_info->chunks.chunks[i].type;
switch (type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
vport->chunks_info.tx_start_qid =
vport_info->chunks.chunks[i].start_queue_id;
vport->chunks_info.tx_qtail_start =
vport_info->chunks.chunks[i].qtail_reg_start;
vport->chunks_info.tx_qtail_spacing =
vport_info->chunks.chunks[i].qtail_reg_spacing;
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
vport->chunks_info.rx_start_qid =
vport_info->chunks.chunks[i].start_queue_id;
vport->chunks_info.rx_qtail_start =
vport_info->chunks.chunks[i].qtail_reg_start;
vport->chunks_info.rx_qtail_spacing =
vport_info->chunks.chunks[i].qtail_reg_spacing;
break;
case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
vport->chunks_info.tx_compl_start_qid =
vport_info->chunks.chunks[i].start_queue_id;
vport->chunks_info.tx_compl_qtail_start =
vport_info->chunks.chunks[i].qtail_reg_start;
vport->chunks_info.tx_compl_qtail_spacing =
vport_info->chunks.chunks[i].qtail_reg_spacing;
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
vport->chunks_info.rx_buf_start_qid =
vport_info->chunks.chunks[i].start_queue_id;
vport->chunks_info.rx_buf_qtail_start =
vport_info->chunks.chunks[i].qtail_reg_start;
vport->chunks_info.rx_buf_qtail_spacing =
vport_info->chunks.chunks[i].qtail_reg_spacing;
break;
default:
PMD_INIT_LOG(ERR, "Unsupported queue type");
break;
}
}
ret = idpf_parse_devarg_id(dev->data->name);
if (ret < 0) {
PMD_INIT_LOG(ERR, "Failed to parse devarg id.");
return -EINVAL;
}
vport->devarg_id = ret;
vport->dev_data = dev->data;
adapter->vports[idx] = vport;
return 0;
}
static int
idpf_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_conf *conf = &dev->data->dev_conf;
if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
PMD_INIT_LOG(ERR, "Setting link speed is not supported");
return -ENOTSUP;
}
if (dev->data->nb_rx_queues == 1 && conf->rxmode.mq_mode != RTE_ETH_MQ_RX_NONE) {
PMD_INIT_LOG(ERR, "Multi-queue packet distribution mode %d is not supported",
conf->rxmode.mq_mode);
return -ENOTSUP;
}
if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
PMD_INIT_LOG(ERR, "Multi-queue TX mode %d is not supported",
conf->txmode.mq_mode);
return -ENOTSUP;
}
if (conf->lpbk_mode != 0) {
PMD_INIT_LOG(ERR, "Loopback operation mode %d is not supported",
conf->lpbk_mode);
return -ENOTSUP;
}
if (conf->dcb_capability_en != 0) {
PMD_INIT_LOG(ERR, "Priority Flow Control(PFC) if not supported");
return -ENOTSUP;
}
if (conf->intr_conf.lsc != 0) {
PMD_INIT_LOG(ERR, "LSC interrupt is not supported");
return -ENOTSUP;
}
if (conf->intr_conf.rxq != 0) {
PMD_INIT_LOG(ERR, "RXQ interrupt is not supported");
return -ENOTSUP;
}
if (conf->intr_conf.rmv != 0) {
PMD_INIT_LOG(ERR, "RMV interrupt is not supported");
return -ENOTSUP;
}
return 0;
}
static int
idpf_dev_close(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_adapter *adapter = vport->adapter;
idpf_vc_destroy_vport(vport);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
rte_free(vport);
dev->data->dev_private = NULL;
return 0;
}
static int
insert_value(struct idpf_adapter *adapter, uint16_t id)
{
uint16_t i;
for (i = 0; i < adapter->req_vport_nb; i++) {
if (adapter->req_vports[i] == id)
return 0;
}
if (adapter->req_vport_nb >= RTE_DIM(adapter->req_vports)) {
PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
IDPF_MAX_VPORT_NUM);
return -EINVAL;
}
adapter->req_vports[adapter->req_vport_nb] = id;
adapter->req_vport_nb++;
return 0;
}
static const char *
parse_range(const char *value, struct idpf_adapter *adapter)
{
uint16_t lo, hi, i;
int n = 0;
int result;
const char *pos = value;
result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
if (result == 1) {
if (lo >= IDPF_MAX_VPORT_NUM)
return NULL;
if (insert_value(adapter, lo) != 0)
return NULL;
} else if (result == 2) {
if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
return NULL;
for (i = lo; i <= hi; i++) {
if (insert_value(adapter, i) != 0)
return NULL;
}
} else {
return NULL;
}
return pos + n;
}
static int
parse_vport(const char *key, const char *value, void *args)
{
struct idpf_adapter *adapter = args;
const char *pos = value;
int i;
adapter->req_vport_nb = 0;
if (*pos == '[')
pos++;
while (1) {
pos = parse_range(pos, adapter);
if (pos == NULL) {
PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", ",
value, key);
return -EINVAL;
}
if (*pos != ',')
break;
pos++;
}
if (*value == '[' && *pos != ']') {
PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", ",
value, key);
return -EINVAL;
}
if (adapter->cur_vport_nb + adapter->req_vport_nb >
IDPF_MAX_VPORT_NUM) {
PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
IDPF_MAX_VPORT_NUM);
return -EINVAL;
}
for (i = 0; i < adapter->req_vport_nb; i++) {
if ((adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) == 0) {
adapter->cur_vports |= RTE_BIT32(adapter->req_vports[i]);
adapter->cur_vport_nb++;
} else {
PMD_INIT_LOG(ERR, "Vport %d has been created",
adapter->req_vports[i]);
return -EINVAL;
}
}
return 0;
}
static int
parse_bool(const char *key, const char *value, void *args)
{
int *i = args;
char *end;
int num;
errno = 0;
num = strtoul(value, &end, 10);
if (errno == ERANGE || (num != 0 && num != 1)) {
PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", value must be 0 or 1",
value, key);
return -EINVAL;
}
*i = num;
return 0;
}
static int
idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
{
struct rte_devargs *devargs = pci_dev->device.devargs;
struct rte_kvargs *kvlist;
int ret;
if (devargs == NULL)
return 0;
kvlist = rte_kvargs_parse(devargs->args, idpf_valid_args);
if (kvlist == NULL) {
PMD_INIT_LOG(ERR, "invalid kvargs key");
return -EINVAL;
}
ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
adapter);
if (ret != 0)
goto bail;
ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
&adapter->txq_model);
if (ret != 0)
goto bail;
ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
&adapter->rxq_model);
if (ret != 0)
goto bail;
bail:
rte_kvargs_free(kvlist);
return ret;
}
static void
idpf_reset_pf(struct idpf_hw *hw)
{
uint32_t reg;
reg = IDPF_READ_REG(hw, PFGEN_CTRL);
IDPF_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));
}
#define IDPF_RESET_WAIT_CNT 100
static int
idpf_check_pf_reset_done(struct idpf_hw *hw)
{
uint32_t reg;
int i;
for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
reg = IDPF_READ_REG(hw, PFGEN_RSTAT);
if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
return 0;
rte_delay_ms(1000);
}
PMD_INIT_LOG(ERR, "IDPF reset timeout");
return -EBUSY;
}
#define CTLQ_NUM 2
static int
idpf_init_mbx(struct idpf_hw *hw)
{
struct idpf_ctlq_create_info ctlq_info[CTLQ_NUM] = {
{
.type = IDPF_CTLQ_TYPE_MAILBOX_TX,
.id = IDPF_CTLQ_ID,
.len = IDPF_CTLQ_LEN,
.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
.reg = {
.head = PF_FW_ATQH,
.tail = PF_FW_ATQT,
.len = PF_FW_ATQLEN,
.bah = PF_FW_ATQBAH,
.bal = PF_FW_ATQBAL,
.len_mask = PF_FW_ATQLEN_ATQLEN_M,
.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,
.head_mask = PF_FW_ATQH_ATQH_M,
}
},
{
.type = IDPF_CTLQ_TYPE_MAILBOX_RX,
.id = IDPF_CTLQ_ID,
.len = IDPF_CTLQ_LEN,
.buf_size = IDPF_DFLT_MBX_BUF_SIZE,
.reg = {
.head = PF_FW_ARQH,
.tail = PF_FW_ARQT,
.len = PF_FW_ARQLEN,
.bah = PF_FW_ARQBAH,
.bal = PF_FW_ARQBAL,
.len_mask = PF_FW_ARQLEN_ARQLEN_M,
.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,
.head_mask = PF_FW_ARQH_ARQH_M,
}
}
};
struct idpf_ctlq_info *ctlq;
int ret;
ret = idpf_ctlq_init(hw, CTLQ_NUM, ctlq_info);
if (ret != 0)
return ret;
LIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,
struct idpf_ctlq_info, cq_list) {
if (ctlq->q_id == IDPF_CTLQ_ID &&
ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
hw->asq = ctlq;
if (ctlq->q_id == IDPF_CTLQ_ID &&
ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
hw->arq = ctlq;
}
if (hw->asq == NULL || hw->arq == NULL) {
idpf_ctlq_deinit(hw);
ret = -ENOENT;
}
return ret;
}
static int
idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
{
struct idpf_hw *hw = &adapter->hw;
int ret = 0;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->hw_addr_len = pci_dev->mem_resource[0].len;
hw->back = adapter;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;
hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
strncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE);
idpf_reset_pf(hw);
ret = idpf_check_pf_reset_done(hw);
if (ret != 0) {
PMD_INIT_LOG(ERR, "IDPF is still resetting");
goto err;
}
ret = idpf_init_mbx(hw);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to init mailbox");
goto err;
}
adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
IDPF_DFLT_MBX_BUF_SIZE, 0);
if (adapter->mbx_resp == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp memory");
ret = -ENOMEM;
goto err_mbx;
}
ret = idpf_vc_check_api_version(adapter);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to check api version");
goto err_api;
}
adapter->caps = rte_zmalloc("idpf_caps",
sizeof(struct virtchnl2_get_capabilities), 0);
if (adapter->caps == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate idpf_caps memory");
ret = -ENOMEM;
goto err_api;
}
ret = idpf_vc_get_caps(adapter);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to get capabilities");
goto err_caps;
}
adapter->max_vport_nb = adapter->caps->max_vports;
adapter->vport_req_info = rte_zmalloc("vport_req_info",
adapter->max_vport_nb *
sizeof(*adapter->vport_req_info),
0);
if (adapter->vport_req_info == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info memory");
ret = -ENOMEM;
goto err_caps;
}
adapter->vport_recv_info = rte_zmalloc("vport_recv_info",
adapter->max_vport_nb *
sizeof(*adapter->vport_recv_info),
0);
if (adapter->vport_recv_info == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate vport_recv_info memory");
ret = -ENOMEM;
goto err_vport_recv_info;
}
adapter->vports = rte_zmalloc("vports",
adapter->max_vport_nb *
sizeof(*adapter->vports),
0);
if (adapter->vports == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate vports memory");
ret = -ENOMEM;
goto err_vports;
}
adapter->max_rxq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -
sizeof(struct virtchnl2_config_rx_queues)) /
sizeof(struct virtchnl2_rxq_info);
adapter->max_txq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -
sizeof(struct virtchnl2_config_tx_queues)) /
sizeof(struct virtchnl2_txq_info);
adapter->cur_vports = 0;
adapter->cur_vport_nb = 0;
return ret;
err_vports:
rte_free(adapter->vport_recv_info);
adapter->vport_recv_info = NULL;
err_vport_recv_info:
rte_free(adapter->vport_req_info);
adapter->vport_req_info = NULL;
err_caps:
rte_free(adapter->caps);
adapter->caps = NULL;
err_api:
rte_free(adapter->mbx_resp);
adapter->mbx_resp = NULL;
err_mbx:
idpf_ctlq_deinit(hw);
err:
return ret;
}
static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_configure = idpf_dev_configure,
.dev_close = idpf_dev_close,
.dev_infos_get = idpf_dev_info_get,
};
static uint16_t
idpf_get_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb)
{
uint16_t vport_idx;
uint16_t i;
for (i = 0; i < max_vport_nb; i++) {
if (vports[i] == NULL)
break;
}
if (i == max_vport_nb)
vport_idx = IDPF_INVALID_VPORT_IDX;
else
vport_idx = i;
return vport_idx;
}
static int
idpf_dev_init(struct rte_eth_dev *dev, void *init_params)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_adapter *adapter = init_params;
int ret = 0;
dev->dev_ops = &idpf_eth_dev_ops;
vport->adapter = adapter;
ret = idpf_init_vport_req_info(dev);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to init vport req_info.");
goto err;
}
ret = idpf_vc_create_vport(adapter);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to create vport.");
goto err_create_vport;
}
ret = idpf_init_vport(dev);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Failed to init vports.");
goto err_init_vport;
}
adapter->cur_vport_idx = idpf_get_vport_idx(adapter->vports,
adapter->max_vport_nb);
dev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);
if (dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate mac_addr memory.");
ret = -ENOMEM;
goto err_init_vport;
}
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
return 0;
err_init_vport:
idpf_vc_destroy_vport(vport);
err_create_vport:
rte_free(vport->adapter->vport_req_info[vport->adapter->cur_vport_idx]);
err:
return ret;
}
static const struct rte_pci_id pci_id_idpf_map[] = {
{ RTE_PCI_DEVICE(IDPF_INTEL_VENDOR_ID, IDPF_DEV_ID_PF) },
{ .vendor_id = 0, /* sentinel */ },
};
struct idpf_adapter *
idpf_find_adapter(struct rte_pci_device *pci_dev)
{
struct idpf_adapter *adapter;
int found = 0;
if (pci_dev == NULL)
return NULL;
rte_spinlock_lock(&idpf_adapter_lock);
TAILQ_FOREACH(adapter, &idpf_adapter_list, next) {
if (strncmp(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE) == 0) {
found = 1;
break;
}
}
rte_spinlock_unlock(&idpf_adapter_lock);
if (found == 0)
return NULL;
return adapter;
}
static void
idpf_adapter_rel(struct idpf_adapter *adapter)
{
struct idpf_hw *hw = &adapter->hw;
int i;
idpf_ctlq_deinit(hw);
rte_free(adapter->caps);
adapter->caps = NULL;
rte_free(adapter->mbx_resp);
adapter->mbx_resp = NULL;
if (adapter->vport_req_info != NULL) {
for (i = 0; i < adapter->max_vport_nb; i++) {
rte_free(adapter->vport_req_info[i]);
adapter->vport_req_info[i] = NULL;
}
rte_free(adapter->vport_req_info);
adapter->vport_req_info = NULL;
}
if (adapter->vport_recv_info != NULL) {
for (i = 0; i < adapter->max_vport_nb; i++) {
rte_free(adapter->vport_recv_info[i]);
adapter->vport_recv_info[i] = NULL;
}
rte_free(adapter->vport_recv_info);
adapter->vport_recv_info = NULL;
}
rte_free(adapter->vports);
adapter->vports = NULL;
}
static int
idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
struct idpf_adapter *adapter;
char name[RTE_ETH_NAME_MAX_LEN];
int i, retval;
bool first_probe = false;
if (!idpf_adapter_list_init) {
rte_spinlock_init(&idpf_adapter_lock);
TAILQ_INIT(&idpf_adapter_list);
idpf_adapter_list_init = true;
}
adapter = idpf_find_adapter(pci_dev);
if (adapter == NULL) {
first_probe = true;
adapter = rte_zmalloc("idpf_adapter",
sizeof(struct idpf_adapter), 0);
if (adapter == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate adapter.");
return -ENOMEM;
}
retval = idpf_adapter_init(pci_dev, adapter);
if (retval != 0) {
PMD_INIT_LOG(ERR, "Failed to init adapter.");
return retval;
}
rte_spinlock_lock(&idpf_adapter_lock);
TAILQ_INSERT_TAIL(&idpf_adapter_list, adapter, next);
rte_spinlock_unlock(&idpf_adapter_lock);
}
retval = idpf_parse_devargs(pci_dev, adapter);
if (retval != 0) {
PMD_INIT_LOG(ERR, "Failed to parse private devargs");
goto err;
}
if (adapter->req_vport_nb == 0) {
/* If no vport devarg, create vport 0 by default. */
snprintf(name, sizeof(name), "idpf_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
sizeof(struct idpf_vport),
NULL, NULL, idpf_dev_init,
adapter);
if (retval != 0)
PMD_DRV_LOG(ERR, "Failed to create default vport 0");
adapter->cur_vports |= RTE_BIT32(0);
adapter->cur_vport_nb++;
} else {
for (i = 0; i < adapter->req_vport_nb; i++) {
snprintf(name, sizeof(name), "idpf_%s_vport_%d",
pci_dev->device.name,
adapter->req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
sizeof(struct idpf_vport),
NULL, NULL, idpf_dev_init,
adapter);
if (retval != 0)
PMD_DRV_LOG(ERR, "Failed to create vport %d",
adapter->req_vports[i]);
}
}
return 0;
err:
if (first_probe) {
rte_spinlock_lock(&idpf_adapter_lock);
TAILQ_REMOVE(&idpf_adapter_list, adapter, next);
rte_spinlock_unlock(&idpf_adapter_lock);
idpf_adapter_rel(adapter);
rte_free(adapter);
}
return retval;
}
static int
idpf_pci_remove(struct rte_pci_device *pci_dev)
{
struct idpf_adapter *adapter = idpf_find_adapter(pci_dev);
uint16_t port_id;
/* Ethdev created can be found RTE_ETH_FOREACH_DEV_OF through rte_device */
RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
rte_eth_dev_close(port_id);
}
rte_spinlock_lock(&idpf_adapter_lock);
TAILQ_REMOVE(&idpf_adapter_list, adapter, next);
rte_spinlock_unlock(&idpf_adapter_lock);
idpf_adapter_rel(adapter);
rte_free(adapter);
return 0;
}
static struct rte_pci_driver rte_idpf_pmd = {
.id_table = pci_id_idpf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = idpf_pci_probe,
.remove = idpf_pci_remove,
};
/**
* Driver initialization routine.
* Invoked once at EAL init time.
* Register itself as the [Poll Mode] Driver of PCI devices.
*/
RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_LOG_REGISTER_SUFFIX(idpf_logtype_init, init, NOTICE);
RTE_LOG_REGISTER_SUFFIX(idpf_logtype_driver, driver, NOTICE);

View File

@ -0,0 +1,189 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2022 Intel Corporation
*/
#ifndef _IDPF_ETHDEV_H_
#define _IDPF_ETHDEV_H_
#include <stdint.h>
#include <rte_malloc.h>
#include <rte_spinlock.h>
#include <rte_ethdev.h>
#include <rte_kvargs.h>
#include <ethdev_driver.h>
#include <ethdev_pci.h>
#include "idpf_logs.h"
#include <base/idpf_prototype.h>
#include <base/virtchnl2.h>
#define IDPF_MAX_VPORT_NUM 8
#define IDPF_DEFAULT_RXQ_NUM 16
#define IDPF_DEFAULT_TXQ_NUM 16
#define IDPF_INVALID_VPORT_IDX 0xffff
#define IDPF_TX_COMPLQ_PER_GRP 1
#define IDPF_RX_BUFQ_PER_GRP 2
#define IDPF_CTLQ_ID -1
#define IDPF_CTLQ_LEN 64
#define IDPF_DFLT_MBX_BUF_SIZE 4096
#define IDPF_MIN_BUF_SIZE 1024
#define IDPF_MAX_FRAME_SIZE 9728
#define IDPF_NUM_MACADDR_MAX 64
#define IDPF_VLAN_TAG_SIZE 4
#define IDPF_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)
#define IDPF_ADAPTER_NAME_LEN (PCI_PRI_STR_SIZE + 1)
/* Message type read in virtual channel from PF */
enum idpf_vc_result {
IDPF_MSG_ERR = -1, /* Meet error when accessing admin queue */
IDPF_MSG_NON, /* Read nothing from admin queue */
IDPF_MSG_SYS, /* Read system msg from admin queue */
IDPF_MSG_CMD, /* Read async command result */
};
struct idpf_chunks_info {
uint32_t tx_start_qid;
uint32_t rx_start_qid;
/* Valid only if split queue model */
uint32_t tx_compl_start_qid;
uint32_t rx_buf_start_qid;
uint64_t tx_qtail_start;
uint32_t tx_qtail_spacing;
uint64_t rx_qtail_start;
uint32_t rx_qtail_spacing;
uint64_t tx_compl_qtail_start;
uint32_t tx_compl_qtail_spacing;
uint64_t rx_buf_qtail_start;
uint32_t rx_buf_qtail_spacing;
};
struct idpf_vport {
struct idpf_adapter *adapter; /* Backreference to associated adapter */
uint16_t vport_id;
uint32_t txq_model;
uint32_t rxq_model;
uint16_t num_tx_q;
/* valid only if txq_model is split Q */
uint16_t num_tx_complq;
uint16_t num_rx_q;
/* valid only if rxq_model is split Q */
uint16_t num_rx_bufq;
uint16_t max_mtu;
uint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
uint16_t sw_idx; /* SW idx */
struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
uint16_t max_pkt_len; /* Maximum packet length */
/* Chunk info */
struct idpf_chunks_info chunks_info;
uint16_t devarg_id;
};
struct idpf_adapter {
TAILQ_ENTRY(idpf_adapter) next;
struct idpf_hw hw;
char name[IDPF_ADAPTER_NAME_LEN];
struct virtchnl2_version_info virtchnl_version;
struct virtchnl2_get_capabilities *caps;
volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from ipf */
uint8_t *mbx_resp; /* buffer to store the mailbox response from ipf */
uint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */
uint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */
/* Vport info */
uint8_t **vport_req_info;
uint8_t **vport_recv_info;
struct idpf_vport **vports;
uint16_t max_vport_nb;
uint16_t req_vports[IDPF_MAX_VPORT_NUM];
uint16_t req_vport_nb;
uint16_t cur_vports;
uint16_t cur_vport_nb;
uint16_t cur_vport_idx;
/* Max config queue number per VC message */
uint32_t max_rxq_per_msg;
uint32_t max_txq_per_msg;
};
TAILQ_HEAD(idpf_adapter_list, idpf_adapter);
#define IDPF_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
/* structure used for sending and checking response of virtchnl ops */
struct idpf_cmd_info {
uint32_t ops;
uint8_t *in_args; /* buffer for sending */
uint32_t in_args_size; /* buffer size for sending */
uint8_t *out_buffer; /* buffer for response */
uint32_t out_size; /* buffer size for response */
};
/* notify current command done. Only call in case execute
* _atomic_set_cmd successfully.
*/
static inline void
notify_cmd(struct idpf_adapter *adapter, int msg_ret)
{
adapter->cmd_retval = msg_ret;
/* Return value may be checked in anither thread, need to ensure the coherence. */
rte_wmb();
adapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;
}
/* clear current command. Only call in case execute
* _atomic_set_cmd successfully.
*/
static inline void
clear_cmd(struct idpf_adapter *adapter)
{
/* Return value may be checked in anither thread, need to ensure the coherence. */
rte_wmb();
adapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;
adapter->cmd_retval = VIRTCHNL_STATUS_SUCCESS;
}
/* Check there is pending cmd in execution. If none, set new command. */
static inline bool
atomic_set_cmd(struct idpf_adapter *adapter, enum virtchnl_ops ops)
{
enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
bool ret = __atomic_compare_exchange(&adapter->pend_cmd, &op_unk, &ops,
0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
if (!ret)
PMD_DRV_LOG(ERR, "There is incomplete cmd %d", adapter->pend_cmd);
return !ret;
}
struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);
void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
int idpf_vc_check_api_version(struct idpf_adapter *adapter);
int idpf_vc_get_caps(struct idpf_adapter *adapter);
int idpf_vc_create_vport(struct idpf_adapter *adapter);
int idpf_vc_destroy_vport(struct idpf_vport *vport);
int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
uint16_t buf_len, uint8_t *buf);
#endif /* _IDPF_ETHDEV_H_ */

View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2022 Intel Corporation
*/
#ifndef _IDPF_LOGS_H_
#define _IDPF_LOGS_H_
#include <rte_log.h>
extern int idpf_logtype_init;
extern int idpf_logtype_driver;
#define PMD_INIT_LOG(level, ...) \
rte_log(RTE_LOG_ ## level, \
idpf_logtype_init, \
RTE_FMT("%s(): " \
RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
__func__, \
RTE_FMT_TAIL(__VA_ARGS__,)))
#define PMD_DRV_LOG_RAW(level, ...) \
rte_log(RTE_LOG_ ## level, \
idpf_logtype_driver, \
RTE_FMT("%s(): " \
RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
__func__, \
RTE_FMT_TAIL(__VA_ARGS__,)))
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
#ifdef RTE_LIBRTE_IDPF_DEBUG_RX
#define PMD_RX_LOG(level, ...) \
RTE_LOG(level, \
PMD, \
RTE_FMT("%s(): " \
RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
__func__, \
RTE_FMT_TAIL(__VA_ARGS__,)))
#else
#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
#endif
#ifdef RTE_LIBRTE_IDPF_DEBUG_TX
#define PMD_TX_LOG(level, ...) \
RTE_LOG(level, \
PMD, \
RTE_FMT("%s(): " \
RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
__func__, \
RTE_FMT_TAIL(__VA_ARGS__,)))
#else
#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
#endif
#endif /* _IDPF_LOGS_H_ */

View File

@ -0,0 +1,416 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2022 Intel Corporation
*/
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <stdarg.h>
#include <inttypes.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_atomic.h>
#include <rte_eal.h>
#include <rte_ether.h>
#include <ethdev_driver.h>
#include <ethdev_pci.h>
#include <rte_dev.h>
#include "idpf_ethdev.h"
static int
idpf_vc_clean(struct idpf_adapter *adapter)
{
struct idpf_ctlq_msg *q_msg[IDPF_CTLQ_LEN];
uint16_t num_q_msg = IDPF_CTLQ_LEN;
struct idpf_dma_mem *dma_mem;
int err;
uint32_t i;
for (i = 0; i < 10; i++) {
err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
msleep(20);
if (num_q_msg > 0)
break;
}
if (err != 0)
return err;
/* Empty queue is not an error */
for (i = 0; i < num_q_msg; i++) {
dma_mem = q_msg[i]->ctx.indirect.payload;
if (dma_mem != NULL) {
idpf_free_dma_mem(&adapter->hw, dma_mem);
rte_free(dma_mem);
}
rte_free(q_msg[i]);
}
return 0;
}
static int
idpf_send_vc_msg(struct idpf_adapter *adapter, enum virtchnl_ops op,
uint16_t msg_size, uint8_t *msg)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
int err;
err = idpf_vc_clean(adapter);
if (err != 0)
goto err;
ctlq_msg = rte_zmalloc(NULL, sizeof(struct idpf_ctlq_msg), 0);
if (ctlq_msg == NULL) {
err = -ENOMEM;
goto err;
}
dma_mem = rte_zmalloc(NULL, sizeof(struct idpf_dma_mem), 0);
if (dma_mem == NULL) {
err = -ENOMEM;
goto dma_mem_error;
}
dma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;
idpf_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);
if (dma_mem->va == NULL) {
err = -ENOMEM;
goto dma_alloc_error;
}
memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;
ctlq_msg->func_id = 0;
ctlq_msg->data_len = msg_size;
ctlq_msg->cookie.mbx.chnl_opcode = op;
ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;
ctlq_msg->ctx.indirect.payload = dma_mem;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
if (err != 0)
goto send_error;
return 0;
send_error:
idpf_free_dma_mem(&adapter->hw, dma_mem);
dma_alloc_error:
rte_free(dma_mem);
dma_mem_error:
rte_free(ctlq_msg);
err:
return err;
}
static enum idpf_vc_result
idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len,
uint8_t *buf)
{
struct idpf_hw *hw = &adapter->hw;
struct idpf_ctlq_msg ctlq_msg;
struct idpf_dma_mem *dma_mem = NULL;
enum idpf_vc_result result = IDPF_MSG_NON;
enum virtchnl_ops opcode;
uint16_t pending = 1;
int ret;
ret = idpf_ctlq_recv(hw->arq, &pending, &ctlq_msg);
if (ret != 0) {
PMD_DRV_LOG(DEBUG, "Can't read msg from AQ");
if (ret != -ENOMSG)
result = IDPF_MSG_ERR;
return result;
}
rte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);
opcode = (enum virtchnl_ops)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);
adapter->cmd_retval =
(enum virtchnl_status_code)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);
PMD_DRV_LOG(DEBUG, "CQ from CP carries opcode %u, retval %d",
opcode, adapter->cmd_retval);
if (opcode == VIRTCHNL2_OP_EVENT) {
struct virtchnl2_event *ve =
(struct virtchnl2_event *)ctlq_msg.ctx.indirect.payload->va;
result = IDPF_MSG_SYS;
switch (ve->event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
/* TBD */
break;
default:
PMD_DRV_LOG(ERR, "%s: Unknown event %d from CP",
__func__, ve->event);
break;
}
} else {
/* async reply msg on command issued by pf previously */
result = IDPF_MSG_CMD;
if (opcode != adapter->pend_cmd) {
PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
adapter->pend_cmd, opcode);
result = IDPF_MSG_ERR;
}
}
if (ctlq_msg.data_len != 0)
dma_mem = ctlq_msg.ctx.indirect.payload;
else
pending = 0;
ret = idpf_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);
if (ret != 0 && dma_mem != NULL)
idpf_free_dma_mem(hw, dma_mem);
return result;
}
#define MAX_TRY_TIMES 200
#define ASQ_DELAY_MS 10
int
idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len,
uint8_t *buf)
{
int err = 0;
int i = 0;
int ret;
do {
ret = idpf_read_msg_from_cp(adapter, buf_len, buf);
if (ret == IDPF_MSG_CMD)
break;
rte_delay_ms(ASQ_DELAY_MS);
} while (i++ < MAX_TRY_TIMES);
if (i >= MAX_TRY_TIMES ||
adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
err = -EBUSY;
PMD_DRV_LOG(ERR, "No response or return failure (%d) for cmd %d",
adapter->cmd_retval, ops);
}
return err;
}
static int
idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
{
int err = 0;
int i = 0;
int ret;
if (atomic_set_cmd(adapter, args->ops))
return -EINVAL;
ret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args->in_args);
if (ret != 0) {
PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
clear_cmd(adapter);
return ret;
}
switch (args->ops) {
case VIRTCHNL_OP_VERSION:
case VIRTCHNL2_OP_GET_CAPS:
case VIRTCHNL2_OP_CREATE_VPORT:
case VIRTCHNL2_OP_DESTROY_VPORT:
case VIRTCHNL2_OP_ENABLE_VPORT:
case VIRTCHNL2_OP_DISABLE_VPORT:
/* for init virtchnl ops, need to poll the response */
err = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);
clear_cmd(adapter);
break;
default:
/* For other virtchnl ops in running time,
* wait for the cmd done flag.
*/
do {
if (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)
break;
rte_delay_ms(ASQ_DELAY_MS);
/* If don't read msg or read sys event, continue */
} while (i++ < MAX_TRY_TIMES);
/* If there's no response is received, clear command */
if (i >= MAX_TRY_TIMES ||
adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
err = -EBUSY;
PMD_DRV_LOG(ERR, "No response or return failure (%d) for cmd %d",
adapter->cmd_retval, args->ops);
clear_cmd(adapter);
}
break;
}
return err;
}
int
idpf_vc_check_api_version(struct idpf_adapter *adapter)
{
struct virtchnl2_version_info version, *pver;
struct idpf_cmd_info args;
int err;
memset(&version, 0, sizeof(struct virtchnl_version_info));
version.major = VIRTCHNL2_VERSION_MAJOR_2;
version.minor = VIRTCHNL2_VERSION_MINOR_0;
args.ops = VIRTCHNL_OP_VERSION;
args.in_args = (uint8_t *)&version;
args.in_args_size = sizeof(version);
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
if (err != 0) {
PMD_DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL_OP_VERSION");
return err;
}
pver = (struct virtchnl2_version_info *)args.out_buffer;
adapter->virtchnl_version = *pver;
if (adapter->virtchnl_version.major != VIRTCHNL2_VERSION_MAJOR_2 ||
adapter->virtchnl_version.minor != VIRTCHNL2_VERSION_MINOR_0) {
PMD_INIT_LOG(ERR, "VIRTCHNL API version mismatch:(%u.%u)-(%u.%u)",
adapter->virtchnl_version.major,
adapter->virtchnl_version.minor,
VIRTCHNL2_VERSION_MAJOR_2,
VIRTCHNL2_VERSION_MINOR_0);
return -EINVAL;
}
return 0;
}
int
idpf_vc_get_caps(struct idpf_adapter *adapter)
{
struct virtchnl2_get_capabilities caps_msg;
struct idpf_cmd_info args;
int err;
memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));
args.ops = VIRTCHNL2_OP_GET_CAPS;
args.in_args = (uint8_t *)&caps_msg;
args.in_args_size = sizeof(caps_msg);
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
if (err != 0) {
PMD_DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL2_OP_GET_CAPS");
return err;
}
rte_memcpy(adapter->caps, args.out_buffer, sizeof(caps_msg));
return 0;
}
int
idpf_vc_create_vport(struct idpf_adapter *adapter)
{
uint16_t idx = adapter->cur_vport_idx;
struct virtchnl2_create_vport *vport_req_info =
(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
struct virtchnl2_create_vport vport_msg;
struct idpf_cmd_info args;
int err = -1;
memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
vport_msg.vport_type = vport_req_info->vport_type;
vport_msg.txq_model = vport_req_info->txq_model;
vport_msg.rxq_model = vport_req_info->rxq_model;
vport_msg.num_tx_q = vport_req_info->num_tx_q;
vport_msg.num_tx_complq = vport_req_info->num_tx_complq;
vport_msg.num_rx_q = vport_req_info->num_rx_q;
vport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL2_OP_CREATE_VPORT;
args.in_args = (uint8_t *)&vport_msg;
args.in_args_size = sizeof(vport_msg);
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
if (err != 0) {
PMD_DRV_LOG(ERR,
"Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
return err;
}
if (adapter->vport_recv_info[idx] == NULL) {
adapter->vport_recv_info[idx] = rte_zmalloc(NULL,
IDPF_DFLT_MBX_BUF_SIZE, 0);
if (adapter->vport_recv_info[idx] == NULL) {
PMD_INIT_LOG(ERR, "Failed to alloc vport_recv_info.");
return -ENOMEM;
}
}
rte_memcpy(adapter->vport_recv_info[idx], args.out_buffer,
IDPF_DFLT_MBX_BUF_SIZE);
return 0;
}
int
idpf_vc_destroy_vport(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_vport vc_vport;
struct idpf_cmd_info args;
int err;
vc_vport.vport_id = vport->vport_id;
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL2_OP_DESTROY_VPORT;
args.in_args = (uint8_t *)&vc_vport;
args.in_args_size = sizeof(vc_vport);
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
if (err != 0)
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT");
return err;
}
int
idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_vport vc_vport;
struct idpf_cmd_info args;
int err;
vc_vport.vport_id = vport->vport_id;
args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :
VIRTCHNL2_OP_DISABLE_VPORT;
args.in_args = (uint8_t *)&vc_vport;
args.in_args_size = sizeof(vc_vport);
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT",
enable ? "ENABLE" : "DISABLE");
}
return err;
}

View File

@ -0,0 +1,15 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2022 Intel Corporation
if is_windows
build = false
reason = 'not supported on Windows'
subdir_done()
endif
deps += ['common_idpf']
sources = files(
'idpf_ethdev.c',
'idpf_vchnl.c',
)

View File

@ -0,0 +1,3 @@
DPDK_23 {
local: *;
};

View File

@ -29,6 +29,7 @@ drivers = [
'i40e', 'i40e',
'iavf', 'iavf',
'ice', 'ice',
'idpf',
'igc', 'igc',
'ionic', 'ionic',
'ipn3ke', 'ipn3ke',