net/iavf: fix multi-process shared data
When the iavf_adapter instance is not initialized completely in the primary process, the secondary process accesses its "rte_eth_dev" member, it causes secondary process crash. This patch replaces eth_dev with eth_dev_data in iavf_adapter. Fixes:f978c1c9b3
("net/iavf: add RSS hash parsing in AVX path") Fixes:9c9aa00403
("net/iavf: add offload path for Rx AVX512 flex descriptor") Fixes:63660ea3ee
("net/iavf: add RSS hash parsing in SSE path") Cc: stable@dpdk.org Signed-off-by: Dapeng Yu <dapengx.yu@intel.com> Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
parent
dbabb7b157
commit
435d523112
@ -228,6 +228,8 @@ struct iavf_info {
|
||||
struct virtchnl_qos_cap_list *qos_cap;
|
||||
struct iavf_qtc_map *qtc_map;
|
||||
struct iavf_tm_conf tm_conf;
|
||||
|
||||
struct rte_eth_dev *eth_dev;
|
||||
};
|
||||
|
||||
#define IAVF_MAX_PKT_TYPE 1024
|
||||
@ -256,7 +258,7 @@ struct iavf_devargs {
|
||||
/* Structure to store private data for each VF instance. */
|
||||
struct iavf_adapter {
|
||||
struct iavf_hw hw;
|
||||
struct rte_eth_dev *eth_dev;
|
||||
struct rte_eth_dev_data *dev_data;
|
||||
struct iavf_info vf;
|
||||
|
||||
bool rx_bulk_alloc_allowed;
|
||||
@ -282,8 +284,6 @@ struct iavf_adapter {
|
||||
(&(((struct iavf_vsi *)vsi)->adapter->hw))
|
||||
#define IAVF_VSI_TO_VF(vsi) \
|
||||
(&(((struct iavf_vsi *)vsi)->adapter->vf))
|
||||
#define IAVF_VSI_TO_ETH_DEV(vsi) \
|
||||
(((struct iavf_vsi *)vsi)->adapter->eth_dev)
|
||||
|
||||
static inline void
|
||||
iavf_init_adminq_parameter(struct iavf_hw *hw)
|
||||
@ -397,7 +397,7 @@ int iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add);
|
||||
int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
|
||||
struct rte_ether_addr *mc_addrs,
|
||||
uint32_t mc_addrs_num, bool add);
|
||||
int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num);
|
||||
int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
|
||||
int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
|
||||
int iavf_get_qos_cap(struct iavf_adapter *adapter);
|
||||
int iavf_set_q_tc_map(struct rte_eth_dev *dev,
|
||||
|
@ -383,8 +383,8 @@ iavf_init_rss(struct iavf_adapter *adapter)
|
||||
uint16_t i, j, nb_q;
|
||||
int ret;
|
||||
|
||||
rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
|
||||
nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
|
||||
rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
|
||||
nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
|
||||
vf->max_rss_qregion);
|
||||
|
||||
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
|
||||
@ -438,7 +438,7 @@ iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
|
||||
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
|
||||
int ret;
|
||||
|
||||
ret = iavf_request_queues(ad, num);
|
||||
ret = iavf_request_queues(dev, num);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR, "request queues from PF failed");
|
||||
return ret;
|
||||
@ -1388,7 +1388,7 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
|
||||
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
|
||||
int ret;
|
||||
|
||||
adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
|
||||
adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
|
||||
|
||||
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
|
||||
return -ENOTSUP;
|
||||
@ -2087,6 +2087,8 @@ iavf_init_vf(struct rte_eth_dev *dev)
|
||||
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
|
||||
|
||||
vf->eth_dev = dev;
|
||||
|
||||
err = iavf_parse_devargs(dev);
|
||||
if (err) {
|
||||
PMD_INIT_LOG(ERR, "Failed to parse devargs");
|
||||
@ -2352,7 +2354,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
hw->bus.func = pci_dev->addr.function;
|
||||
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
|
||||
hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
|
||||
adapter->eth_dev = eth_dev;
|
||||
adapter->dev_data = eth_dev->data;
|
||||
adapter->stopped = 1;
|
||||
|
||||
if (iavf_init_vf(eth_dev) != 0) {
|
||||
|
@ -431,7 +431,7 @@ iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
|
||||
}
|
||||
}
|
||||
|
||||
if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
|
||||
if (rss->queue[rss->queue_num - 1] >= ad->dev_data->nb_rx_queues) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION, act,
|
||||
"Invalid queue region indexes.");
|
||||
@ -511,7 +511,7 @@ iavf_fdir_parse_action(struct iavf_adapter *ad,
|
||||
filter_action->act_conf.queue.index = act_q->index;
|
||||
|
||||
if (filter_action->act_conf.queue.index >=
|
||||
ad->eth_dev->data->nb_rx_queues) {
|
||||
ad->dev_data->nb_rx_queues) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION,
|
||||
actions, "Invalid queue for FDIR.");
|
||||
|
@ -1365,7 +1365,7 @@ iavf_hash_uninit(struct iavf_adapter *ad)
|
||||
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF))
|
||||
return;
|
||||
|
||||
rss_conf = &ad->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
|
||||
rss_conf = &ad->dev_data->dev_conf.rx_adv_conf.rss_conf;
|
||||
if (iavf_rss_hash_set(ad, rss_conf->rss_hf, false))
|
||||
PMD_DRV_LOG(ERR, "fail to delete default RSS");
|
||||
|
||||
|
@ -577,8 +577,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
|
||||
|
||||
#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
|
||||
int i; \
|
||||
for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
|
||||
struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
|
||||
for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \
|
||||
struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \
|
||||
if (!rxq) \
|
||||
continue; \
|
||||
rxq->fdir_enabled = on; \
|
||||
|
@ -524,7 +524,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
|
||||
{
|
||||
#define IAVF_DESCS_PER_LOOP_AVX 8
|
||||
|
||||
const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
|
||||
struct iavf_adapter *adapter = rxq->vsi->adapter;
|
||||
|
||||
uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
|
||||
const uint32_t *type_table = adapter->ptype_tbl;
|
||||
|
||||
const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
|
||||
0, rxq->mbuf_initializer);
|
||||
@ -903,9 +906,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
|
||||
* needs to load 2nd 16B of each desc for RSS hash parsing,
|
||||
* will cause performance drop to get into this context.
|
||||
*/
|
||||
if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_RSS_HASH ||
|
||||
rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
|
||||
if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
|
||||
rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
|
||||
/* load bottom half of every 32B desc */
|
||||
const __m128i raw_desc_bh7 =
|
||||
_mm_load_si128
|
||||
@ -956,8 +958,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
|
||||
(_mm256_castsi128_si256(raw_desc_bh0),
|
||||
raw_desc_bh1, 1);
|
||||
|
||||
if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_RSS_HASH) {
|
||||
if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
|
||||
/**
|
||||
* to shift the 32b RSS hash value to the
|
||||
* highest 32b of each 128b before mask
|
||||
|
@ -710,8 +710,12 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
|
||||
uint8_t *split_packet,
|
||||
bool offload)
|
||||
{
|
||||
struct iavf_adapter *adapter = rxq->vsi->adapter;
|
||||
|
||||
uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
|
||||
|
||||
#ifdef IAVF_RX_PTYPE_OFFLOAD
|
||||
const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
|
||||
const uint32_t *type_table = adapter->ptype_tbl;
|
||||
#endif
|
||||
|
||||
const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
|
||||
@ -1137,8 +1141,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
|
||||
* needs to load 2nd 16B of each desc for RSS hash parsing,
|
||||
* will cause performance drop to get into this context.
|
||||
*/
|
||||
if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_RSS_HASH ||
|
||||
if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
|
||||
rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
|
||||
/* load bottom half of every 32B desc */
|
||||
const __m128i raw_desc_bh7 =
|
||||
@ -1190,8 +1193,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
|
||||
(_mm256_castsi128_si256(raw_desc_bh0),
|
||||
raw_desc_bh1, 1);
|
||||
|
||||
if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_RSS_HASH) {
|
||||
if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
|
||||
/**
|
||||
* to shift the 32b RSS hash value to the
|
||||
* highest 32b of each 128b before mask
|
||||
|
@ -644,7 +644,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
|
||||
uint16_t nb_pkts_recd;
|
||||
int pos;
|
||||
uint64_t var;
|
||||
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
|
||||
struct iavf_adapter *adapter = rxq->vsi->adapter;
|
||||
uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
|
||||
const uint32_t *ptype_tbl = adapter->ptype_tbl;
|
||||
__m128i crc_adjust = _mm_set_epi16
|
||||
(0, 0, 0, /* ignore non-length fields */
|
||||
-rxq->crc_len, /* sub crc on data_len */
|
||||
@ -817,8 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
|
||||
* needs to load 2nd 16B of each desc for RSS hash parsing,
|
||||
* will cause performance drop to get into this context.
|
||||
*/
|
||||
if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_RSS_HASH) {
|
||||
if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
|
||||
/* load bottom half of every 32B desc */
|
||||
const __m128i raw_desc_bh3 =
|
||||
_mm_load_si128
|
||||
|
@ -72,7 +72,6 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
|
||||
{
|
||||
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
|
||||
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
|
||||
struct rte_eth_dev *dev = adapter->eth_dev;
|
||||
struct iavf_arq_event_info event;
|
||||
enum iavf_aq_result result = IAVF_MSG_NON;
|
||||
enum virtchnl_ops opcode;
|
||||
@ -114,7 +113,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
|
||||
speed = vpe->event_data.link_event.link_speed;
|
||||
vf->link_speed = iavf_convert_link_speed(speed);
|
||||
}
|
||||
iavf_dev_link_update(dev, 0);
|
||||
iavf_dev_link_update(vf->eth_dev, 0);
|
||||
PMD_DRV_LOG(INFO, "Link status update:%s",
|
||||
vf->link_up ? "up" : "down");
|
||||
break;
|
||||
@ -690,8 +689,8 @@ iavf_enable_queues(struct iavf_adapter *adapter)
|
||||
memset(&queue_select, 0, sizeof(queue_select));
|
||||
queue_select.vsi_id = vf->vsi_res->vsi_id;
|
||||
|
||||
queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
|
||||
queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
|
||||
queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
|
||||
queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
|
||||
|
||||
args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
|
||||
args.in_args = (u8 *)&queue_select;
|
||||
@ -718,8 +717,8 @@ iavf_disable_queues(struct iavf_adapter *adapter)
|
||||
memset(&queue_select, 0, sizeof(queue_select));
|
||||
queue_select.vsi_id = vf->vsi_res->vsi_id;
|
||||
|
||||
queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
|
||||
queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
|
||||
queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
|
||||
queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
|
||||
|
||||
args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
|
||||
args.in_args = (u8 *)&queue_select;
|
||||
@ -789,12 +788,12 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
|
||||
adapter->eth_dev->data->nb_tx_queues;
|
||||
adapter->dev_data->nb_tx_queues;
|
||||
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
|
||||
adapter->eth_dev->data->nb_rx_queues;
|
||||
adapter->dev_data->nb_rx_queues;
|
||||
|
||||
args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
|
||||
args.in_args = (u8 *)queue_select;
|
||||
@ -833,12 +832,12 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
|
||||
adapter->eth_dev->data->nb_tx_queues;
|
||||
adapter->dev_data->nb_tx_queues;
|
||||
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
|
||||
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
|
||||
adapter->eth_dev->data->nb_rx_queues;
|
||||
adapter->dev_data->nb_rx_queues;
|
||||
|
||||
args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
|
||||
args.in_args = (u8 *)queue_select;
|
||||
@ -969,9 +968,9 @@ iavf_configure_queues(struct iavf_adapter *adapter,
|
||||
uint16_t num_queue_pairs, uint16_t index)
|
||||
{
|
||||
struct iavf_rx_queue **rxq =
|
||||
(struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
|
||||
(struct iavf_rx_queue **)adapter->dev_data->rx_queues;
|
||||
struct iavf_tx_queue **txq =
|
||||
(struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues;
|
||||
(struct iavf_tx_queue **)adapter->dev_data->tx_queues;
|
||||
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
|
||||
struct virtchnl_vsi_queue_config_info *vc_config;
|
||||
struct virtchnl_queue_pair_info *vc_qp;
|
||||
@ -995,7 +994,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
|
||||
vc_qp->txq.queue_id = i;
|
||||
|
||||
/* Virtchnnl configure tx queues by pairs */
|
||||
if (i < adapter->eth_dev->data->nb_tx_queues) {
|
||||
if (i < adapter->dev_data->nb_tx_queues) {
|
||||
vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
|
||||
vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
|
||||
}
|
||||
@ -1004,7 +1003,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
|
||||
vc_qp->rxq.queue_id = i;
|
||||
vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
|
||||
|
||||
if (i >= adapter->eth_dev->data->nb_rx_queues)
|
||||
if (i >= adapter->dev_data->nb_rx_queues)
|
||||
continue;
|
||||
|
||||
/* Virtchnnl configure rx queues by pairs */
|
||||
@ -1073,7 +1072,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
|
||||
return -ENOMEM;
|
||||
|
||||
map_info->num_vectors = vf->nb_msix;
|
||||
for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
|
||||
for (i = 0; i < adapter->dev_data->nb_rx_queues; i++) {
|
||||
vecmap =
|
||||
&map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
|
||||
vecmap->vsi_id = vf->vsi_res->vsi_id;
|
||||
@ -1152,7 +1151,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
|
||||
j = 0;
|
||||
len = sizeof(struct virtchnl_ether_addr_list);
|
||||
for (i = begin; i < IAVF_NUM_MACADDR_MAX; i++, next_begin++) {
|
||||
addr = &adapter->eth_dev->data->mac_addrs[i];
|
||||
addr = &adapter->dev_data->mac_addrs[i];
|
||||
if (rte_is_zero_ether_addr(addr))
|
||||
continue;
|
||||
len += sizeof(struct virtchnl_ether_addr);
|
||||
@ -1169,7 +1168,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
|
||||
}
|
||||
|
||||
for (i = begin; i < next_begin; i++) {
|
||||
addr = &adapter->eth_dev->data->mac_addrs[i];
|
||||
addr = &adapter->dev_data->mac_addrs[i];
|
||||
if (rte_is_zero_ether_addr(addr))
|
||||
continue;
|
||||
rte_memcpy(list->list[j].addr, addr->addr_bytes,
|
||||
@ -1653,9 +1652,10 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
|
||||
}
|
||||
|
||||
int
|
||||
iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
|
||||
iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
|
||||
{
|
||||
struct rte_eth_dev *dev = adapter->eth_dev;
|
||||
struct iavf_adapter *adapter =
|
||||
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
|
||||
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
||||
struct virtchnl_vf_res_request vfres;
|
||||
|
Loading…
Reference in New Issue
Block a user