numam-dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c
Jiawen Wu b7311360fb net/txgbe: support VF interrupt
Add VF device interrupt handler, support to enable and disable RX queue
interrupt, and configure MSI-X interrupt.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
2021-02-26 14:13:03 +01:00

668 lines
18 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2015-2020
*/
#include <sys/queue.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
#include <string.h>
#include <rte_log.h>
#include <ethdev_pci.h>
#include "txgbe_logs.h"
#include "base/txgbe.h"
#include "txgbe_ethdev.h"
#include "txgbe_rxtx.h"
static int txgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int txgbevf_dev_close(struct rte_eth_dev *dev);
static void txgbevf_intr_disable(struct rte_eth_dev *dev);
static void txgbevf_intr_enable(struct rte_eth_dev *dev);
static void txgbevf_configure_msix(struct rte_eth_dev *dev);
static void txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static void txgbevf_dev_interrupt_handler(void *param);
/*
* The set of PCI devices this driver supports (for VF)
*/
static const struct rte_pci_id pci_id_txgbevf_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_VF_HV) },
{ .vendor_id = 0, /* sentinel */ },
};
static const struct rte_eth_desc_lim rx_desc_lim = {
.nb_max = TXGBE_RING_DESC_MAX,
.nb_min = TXGBE_RING_DESC_MIN,
.nb_align = TXGBE_RXD_ALIGN,
};
static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = TXGBE_RING_DESC_MAX,
.nb_min = TXGBE_RING_DESC_MIN,
.nb_align = TXGBE_TXD_ALIGN,
.nb_seg_max = TXGBE_TX_MAX_SEG,
.nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
};
static const struct eth_dev_ops txgbevf_eth_dev_ops;
/*
* Negotiate mailbox API version with the PF.
* After reset API version is always set to the basic one (txgbe_mbox_api_10).
* Then we try to negotiate starting with the most recent one.
* If all negotiation attempts fail, then we will proceed with
* the default one (txgbe_mbox_api_10).
*/
static void
txgbevf_negotiate_api(struct txgbe_hw *hw)
{
int32_t i;
/* start with highest supported, proceed down */
static const int sup_ver[] = {
txgbe_mbox_api_13,
txgbe_mbox_api_12,
txgbe_mbox_api_11,
txgbe_mbox_api_10,
};
for (i = 0; i < ARRAY_SIZE(sup_ver); i++) {
if (txgbevf_negotiate_api_version(hw, sup_ver[i]) == 0)
break;
}
}
static void
generate_random_mac_addr(struct rte_ether_addr *mac_addr)
{
uint64_t random;
/* Set Organizationally Unique Identifier (OUI) prefix. */
mac_addr->addr_bytes[0] = 0x00;
mac_addr->addr_bytes[1] = 0x09;
mac_addr->addr_bytes[2] = 0xC0;
/* Force indication of locally assigned MAC address. */
mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
/* Generate the last 3 bytes of the MAC address with a random number. */
random = rte_rand();
memcpy(&mac_addr->addr_bytes[3], &random, 3);
}
/*
* Virtual Function device init
*/
static int
eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
{
int err;
uint32_t tc, tcs;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
struct rte_ether_addr *perm_addr =
(struct rte_ether_addr *)hw->mac.perm_addr;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &txgbevf_eth_dev_ops;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct txgbe_tx_queue *txq;
uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
*/
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
txgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE,
"No TX queues configured yet. Using default TX function.");
}
txgbe_set_rx_function(eth_dev);
return 0;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
/* Initialize the shared code (base driver) */
err = txgbe_init_shared_code(hw);
if (err != 0) {
PMD_INIT_LOG(ERR,
"Shared code init failed for txgbevf: %d", err);
return -EIO;
}
/* init_mailbox_params */
hw->mbx.init_params(hw);
/* Disable the interrupts for VF */
txgbevf_intr_disable(eth_dev);
hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
err = hw->mac.reset_hw(hw);
/*
* The VF reset operation returns the TXGBE_ERR_INVALID_MAC_ADDR when
* the underlying PF driver has not assigned a MAC address to the VF.
* In this case, assign a random MAC address.
*/
if (err != 0 && err != TXGBE_ERR_INVALID_MAC_ADDR) {
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
/*
* This error code will be propagated to the app by
* rte_eth_dev_reset, so use a public error code rather than
* the internal-only TXGBE_ERR_RESET_FAILED
*/
return -EAGAIN;
}
/* negotiate mailbox API version to use with the PF. */
txgbevf_negotiate_api(hw);
/* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
txgbevf_get_queues(hw, &tcs, &tc);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("txgbevf", RTE_ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %u bytes needed to store "
"MAC addresses",
RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Generate a random MAC address, if none was assigned by PF. */
if (rte_is_zero_ether_addr(perm_addr)) {
generate_random_mac_addr(perm_addr);
err = txgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
if (err) {
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
return err;
}
PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
"%02x:%02x:%02x:%02x:%02x:%02x",
perm_addr->addr_bytes[0],
perm_addr->addr_bytes[1],
perm_addr->addr_bytes[2],
perm_addr->addr_bytes[3],
perm_addr->addr_bytes[4],
perm_addr->addr_bytes[5]);
}
/* Copy the permanent MAC address */
rte_ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
/* reset the hardware with the new settings */
err = hw->mac.start_hw(hw);
if (err) {
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err);
return -EIO;
}
rte_intr_callback_register(intr_handle,
txgbevf_dev_interrupt_handler, eth_dev);
rte_intr_enable(intr_handle);
txgbevf_intr_enable(eth_dev);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "txgbe_mac_raptor_vf");
return 0;
}
/* Virtual Function device uninit */
static int
eth_txgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
txgbevf_dev_close(eth_dev);
return 0;
}
static int eth_txgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
return rte_eth_dev_pci_generic_probe(pci_dev,
sizeof(struct txgbe_adapter), eth_txgbevf_dev_init);
}
static int eth_txgbevf_pci_remove(struct rte_pci_device *pci_dev)
{
return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbevf_dev_uninit);
}
/*
* virtual function driver struct
*/
static struct rte_pci_driver rte_txgbevf_pmd = {
.id_table = pci_id_txgbevf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = eth_txgbevf_pci_probe,
.remove = eth_txgbevf_pci_remove,
};
static int
txgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024;
dev_info->max_rx_pktlen = TXGBE_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = pci_dev->max_vfs;
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = TXGBE_DEFAULT_RX_PTHRESH,
.hthresh = TXGBE_DEFAULT_RX_HTHRESH,
.wthresh = TXGBE_DEFAULT_RX_WTHRESH,
},
.rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
.offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_thresh = {
.pthresh = TXGBE_DEFAULT_TX_PTHRESH,
.hthresh = TXGBE_DEFAULT_TX_HTHRESH,
.wthresh = TXGBE_DEFAULT_TX_WTHRESH,
},
.tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
.offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
return 0;
}
/*
* Virtual Function operations
*/
static void
txgbevf_intr_disable(struct rte_eth_dev *dev)
{
struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
PMD_INIT_FUNC_TRACE();
/* Clear interrupt mask to stop from interrupts being generated */
wr32(hw, TXGBE_VFIMS, TXGBE_VFIMS_MASK);
txgbe_flush(hw);
/* Clear mask value. */
intr->mask_misc = TXGBE_VFIMS_MASK;
}
static void
txgbevf_intr_enable(struct rte_eth_dev *dev)
{
struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
PMD_INIT_FUNC_TRACE();
/* VF enable interrupt autoclean */
wr32(hw, TXGBE_VFIMC, TXGBE_VFIMC_MASK);
txgbe_flush(hw);
intr->mask_misc = 0;
}
static int
txgbevf_dev_close(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
hw->mac.reset_hw(hw);
txgbe_dev_free_queues(dev);
/**
* Remove the VF MAC address ro ensure
* that the VF traffic goes to the PF
* after stop, close and detach of the VF
**/
txgbevf_remove_mac_addr(dev, 0);
/* Disable the interrupts for VF */
txgbevf_intr_disable(dev);
rte_free(dev->data->mac_addrs);
dev->data->mac_addrs = NULL;
rte_intr_disable(intr_handle);
rte_intr_callback_unregister(intr_handle,
txgbevf_dev_interrupt_handler, dev);
return 0;
}
static int
txgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t vec = TXGBE_MISC_VEC_ID;
if (rte_intr_allow_others(intr_handle))
vec = TXGBE_RX_VEC_START;
intr->mask_misc &= ~(1 << vec);
RTE_SET_USED(queue_id);
wr32(hw, TXGBE_VFIMC, ~intr->mask_misc);
rte_intr_enable(intr_handle);
return 0;
}
static int
txgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vec = TXGBE_MISC_VEC_ID;
if (rte_intr_allow_others(intr_handle))
vec = TXGBE_RX_VEC_START;
intr->mask_misc |= (1 << vec);
RTE_SET_USED(queue_id);
wr32(hw, TXGBE_VFIMS, intr->mask_misc);
return 0;
}
static void
txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector)
{
uint32_t tmp, idx;
if (direction == -1) {
/* other causes */
msix_vector |= TXGBE_VFIVAR_VLD;
tmp = rd32(hw, TXGBE_VFIVARMISC);
tmp &= ~0xFF;
tmp |= msix_vector;
wr32(hw, TXGBE_VFIVARMISC, tmp);
} else {
/* rx or tx cause */
/* Workround for ICR lost */
idx = ((16 * (queue & 1)) + (8 * direction));
tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
tmp &= ~(0xFF << idx);
tmp |= (msix_vector << idx);
wr32(hw, TXGBE_VFIVAR(queue >> 1), tmp);
}
}
static void
txgbevf_configure_msix(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t q_idx;
uint32_t vector_idx = TXGBE_MISC_VEC_ID;
uint32_t base = TXGBE_MISC_VEC_ID;
/* Configure VF other cause ivar */
txgbevf_set_ivar_map(hw, -1, 1, vector_idx);
/* won't configure msix register if no mapping is done
* between intr vector and event fd.
*/
if (!rte_intr_dp_is_en(intr_handle))
return;
if (rte_intr_allow_others(intr_handle)) {
base = TXGBE_RX_VEC_START;
vector_idx = TXGBE_RX_VEC_START;
}
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
* as TXGBE_VF_MAXMSIVECOTR = 1
*/
txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
intr_handle->intr_vec[q_idx] = vector_idx;
if (vector_idx < base + intr_handle->nb_efd - 1)
vector_idx++;
}
/* As RX queue setting above show, all queues use the vector 0.
* Set only the ITR value of TXGBE_MISC_VEC_ID.
*/
wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
TXGBE_ITR_IVAL(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
| TXGBE_ITR_WRDSA);
}
static int
txgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
__rte_unused uint32_t index,
__rte_unused uint32_t pool)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
int err;
/*
* On a VF, adding again the same MAC addr is not an idempotent
* operation. Trap this case to avoid exhausting the [very limited]
* set of PF resources used to store VF MAC addresses.
*/
if (memcmp(hw->mac.perm_addr, mac_addr,
sizeof(struct rte_ether_addr)) == 0)
return -1;
err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
if (err != 0)
PMD_DRV_LOG(ERR, "Unable to add MAC address "
"%02x:%02x:%02x:%02x:%02x:%02x - err=%d",
mac_addr->addr_bytes[0],
mac_addr->addr_bytes[1],
mac_addr->addr_bytes[2],
mac_addr->addr_bytes[3],
mac_addr->addr_bytes[4],
mac_addr->addr_bytes[5],
err);
return err;
}
static void
txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct rte_ether_addr *perm_addr =
(struct rte_ether_addr *)hw->mac.perm_addr;
struct rte_ether_addr *mac_addr;
uint32_t i;
int err;
/*
* The TXGBE_VF_SET_MACVLAN command of the txgbe-pf driver does
* not support the deletion of a given MAC address.
* Instead, it imposes to delete all MAC addresses, then to add again
* all MAC addresses with the exception of the one to be deleted.
*/
(void)txgbevf_set_uc_addr_vf(hw, 0, NULL);
/*
* Add again all MAC addresses, with the exception of the deleted one
* and of the permanent MAC address.
*/
for (i = 0, mac_addr = dev->data->mac_addrs;
i < hw->mac.num_rar_entries; i++, mac_addr++) {
/* Skip the deleted MAC address */
if (i == index)
continue;
/* Skip NULL MAC addresses */
if (rte_is_zero_ether_addr(mac_addr))
continue;
/* Skip the permanent MAC address */
if (memcmp(perm_addr, mac_addr,
sizeof(struct rte_ether_addr)) == 0)
continue;
err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
if (err != 0)
PMD_DRV_LOG(ERR,
"Adding again MAC address "
"%02x:%02x:%02x:%02x:%02x:%02x failed "
"err=%d",
mac_addr->addr_bytes[0],
mac_addr->addr_bytes[1],
mac_addr->addr_bytes[2],
mac_addr->addr_bytes[3],
mac_addr->addr_bytes[4],
mac_addr->addr_bytes[5],
err);
}
}
static int
txgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct rte_ether_addr *addr)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
hw->mac.set_rar(hw, 0, (void *)addr, 0, 0);
return 0;
}
static void txgbevf_mbx_process(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
u32 in_msg = 0;
/* peek the message first */
in_msg = rd32(hw, TXGBE_VFMBX);
/* PF reset VF event */
if (in_msg == TXGBE_PF_CONTROL_MSG) {
/* dummy mbx read to ack pf */
if (txgbe_read_mbx(hw, &in_msg, 1, 0))
return;
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
NULL);
}
}
static int
txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
{
uint32_t eicr;
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
txgbevf_intr_disable(dev);
/* read-on-clear nic registers here */
eicr = rd32(hw, TXGBE_VFICR);
intr->flags = 0;
/* only one misc vector supported - mailbox */
eicr &= TXGBE_VFICR_MASK;
/* Workround for ICR lost */
intr->flags |= TXGBE_FLAG_MAILBOX;
return 0;
}
static int
txgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
{
struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
if (intr->flags & TXGBE_FLAG_MAILBOX) {
txgbevf_mbx_process(dev);
intr->flags &= ~TXGBE_FLAG_MAILBOX;
}
txgbevf_intr_enable(dev);
return 0;
}
static void
txgbevf_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
txgbevf_dev_interrupt_get_status(dev);
txgbevf_dev_interrupt_action(dev);
}
/*
* dev_ops for virtual function, bare necessities for basic vf
* operation have been implemented
*/
static const struct eth_dev_ops txgbevf_eth_dev_ops = {
.dev_infos_get = txgbevf_dev_info_get,
.rx_queue_intr_enable = txgbevf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = txgbevf_dev_rx_queue_intr_disable,
.mac_addr_add = txgbevf_add_mac_addr,
.mac_addr_remove = txgbevf_remove_mac_addr,
.rxq_info_get = txgbe_rxq_info_get,
.txq_info_get = txgbe_txq_info_get,
.mac_addr_set = txgbevf_set_default_mac_addr,
};
RTE_PMD_REGISTER_PCI(net_txgbe_vf, rte_txgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_txgbe_vf, pci_id_txgbevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_txgbe_vf, "* igb_uio | vfio-pci");