2017-12-19 15:49:01 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2015 Intel Corporation
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _E1000_ETHDEV_H_
|
|
|
|
#define _E1000_ETHDEV_H_
|
2018-04-25 15:27:50 +00:00
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
|
|
#include <rte_flow.h>
|
2015-11-13 16:09:10 +00:00
|
|
|
#include <rte_time.h>
|
2017-07-07 00:04:21 +00:00
|
|
|
#include <rte_pci.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2016-07-11 14:40:37 +00:00
|
|
|
#define E1000_INTEL_VENDOR_ID 0x8086
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* need update link, bit flag */
|
|
|
|
#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
|
2013-06-03 00:00:00 +00:00
|
|
|
#define E1000_FLAG_MAILBOX (uint32_t)(1 << 1)
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
|
|
|
|
* driver.
|
|
|
|
*/
|
|
|
|
#define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
|
|
|
|
#define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
|
|
|
|
#define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */
|
|
|
|
#define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
|
2012-12-19 23:00:00 +00:00
|
|
|
#define E1000_RXD_ERR_CKSUM_BIT 29
|
2012-09-04 12:54:00 +00:00
|
|
|
#define E1000_RXD_ERR_CKSUM_MSK 3
|
|
|
|
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
|
2012-12-19 23:00:00 +00:00
|
|
|
#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
|
|
|
|
#define IGB_VFTA_SIZE 128
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2018-04-25 15:27:50 +00:00
|
|
|
#define IGB_HKEY_MAX_INDEX 10
|
2014-06-16 07:31:44 +00:00
|
|
|
#define IGB_MAX_RX_QUEUE_NUM 8
|
|
|
|
#define IGB_MAX_RX_QUEUE_NUM_82576 16
|
|
|
|
|
2019-07-22 15:11:52 +00:00
|
|
|
#define E1000_I219_MAX_RX_QUEUE_NUM 2
|
|
|
|
#define E1000_I219_MAX_TX_QUEUE_NUM 2
|
|
|
|
|
2014-06-16 07:31:44 +00:00
|
|
|
#define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
|
|
|
|
#define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
|
|
|
|
#define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
|
|
|
|
#define E1000_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */
|
|
|
|
|
|
|
|
#define E1000_ETQF_ETHERTYPE 0x0000FFFF
|
|
|
|
#define E1000_ETQF_QUEUE 0x00070000
|
|
|
|
#define E1000_ETQF_QUEUE_SHIFT 16
|
|
|
|
#define E1000_MAX_ETQF_FILTERS 8
|
|
|
|
|
|
|
|
#define E1000_IMIR_DSTPORT 0x0000FFFF
|
|
|
|
#define E1000_IMIR_PRIORITY 0xE0000000
|
|
|
|
#define E1000_MAX_TTQF_FILTERS 8
|
|
|
|
#define E1000_2TUPLE_MAX_PRI 7
|
|
|
|
|
2015-02-21 01:53:08 +00:00
|
|
|
#define E1000_MAX_FLEX_FILTERS 8
|
2014-06-16 07:31:44 +00:00
|
|
|
#define E1000_MAX_FHFT 4
|
|
|
|
#define E1000_MAX_FHFT_EXT 4
|
2015-02-21 01:53:08 +00:00
|
|
|
#define E1000_FHFT_SIZE_IN_DWD 64
|
2014-06-16 07:31:44 +00:00
|
|
|
#define E1000_MAX_FLEX_FILTER_PRI 7
|
|
|
|
#define E1000_MAX_FLEX_FILTER_LEN 128
|
2015-02-21 01:53:08 +00:00
|
|
|
#define E1000_MAX_FLEX_FILTER_DWDS \
|
|
|
|
(E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
|
|
|
|
#define E1000_FLEX_FILTERS_MASK_SIZE \
|
2017-06-16 05:04:24 +00:00
|
|
|
(E1000_MAX_FLEX_FILTER_DWDS / 2)
|
2014-06-16 07:31:44 +00:00
|
|
|
#define E1000_FHFT_QUEUEING_LEN 0x0000007F
|
|
|
|
#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
|
|
|
|
#define E1000_FHFT_QUEUEING_PRIO 0x00070000
|
|
|
|
#define E1000_FHFT_QUEUEING_OFFSET 0xFC
|
|
|
|
#define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8
|
|
|
|
#define E1000_FHFT_QUEUEING_PRIO_SHIFT 16
|
|
|
|
#define E1000_WUFC_FLEX_HQ 0x00004000
|
|
|
|
|
|
|
|
#define E1000_SPQF_SRCPORT 0x0000FFFF
|
|
|
|
|
|
|
|
#define E1000_MAX_FTQF_FILTERS 8
|
|
|
|
#define E1000_FTQF_PROTOCOL_MASK 0x000000FF
|
|
|
|
#define E1000_FTQF_5TUPLE_MASK_SHIFT 28
|
|
|
|
#define E1000_FTQF_QUEUE_MASK 0x03ff0000
|
|
|
|
#define E1000_FTQF_QUEUE_SHIFT 16
|
|
|
|
#define E1000_FTQF_QUEUE_ENABLE 0x00000100
|
|
|
|
|
2015-02-04 07:16:32 +00:00
|
|
|
#define IGB_RSS_OFFLOAD_ALL ( \
|
|
|
|
ETH_RSS_IPV4 | \
|
|
|
|
ETH_RSS_NONFRAG_IPV4_TCP | \
|
|
|
|
ETH_RSS_NONFRAG_IPV4_UDP | \
|
|
|
|
ETH_RSS_IPV6 | \
|
|
|
|
ETH_RSS_NONFRAG_IPV6_TCP | \
|
|
|
|
ETH_RSS_NONFRAG_IPV6_UDP | \
|
|
|
|
ETH_RSS_IPV6_EX | \
|
|
|
|
ETH_RSS_IPV6_TCP_EX | \
|
|
|
|
ETH_RSS_IPV6_UDP_EX)
|
|
|
|
|
2019-03-29 17:52:18 +00:00
|
|
|
/*
|
|
|
|
* The overhead from MTU to max frame size.
|
|
|
|
* Considering VLAN so a tag needs to be counted.
|
|
|
|
*/
|
2019-05-21 16:13:05 +00:00
|
|
|
#define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
|
|
|
|
VLAN_TAG_SIZE)
|
2019-03-29 17:52:18 +00:00
|
|
|
|
2015-10-27 12:51:46 +00:00
|
|
|
/*
|
|
|
|
* Maximum number of Ring Descriptors.
|
|
|
|
*
|
|
|
|
* Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
|
|
|
|
* desscriptors should meet the following condition:
|
|
|
|
* (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
|
|
|
|
*/
|
|
|
|
#define E1000_MIN_RING_DESC 32
|
|
|
|
#define E1000_MAX_RING_DESC 4096
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
|
|
|
|
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
|
|
|
|
* This will also optimize cache line size effect.
|
|
|
|
* H/W supports up to cache line size 128.
|
|
|
|
*/
|
|
|
|
#define E1000_ALIGN 128
|
|
|
|
|
|
|
|
#define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
|
|
|
|
#define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
|
|
|
|
|
|
|
|
#define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc))
|
|
|
|
#define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc))
|
|
|
|
|
2015-11-04 08:45:30 +00:00
|
|
|
#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
|
|
|
|
#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
|
|
|
|
|
2016-12-23 18:40:48 +00:00
|
|
|
#define IGB_TX_MAX_SEG UINT8_MAX
|
|
|
|
#define IGB_TX_MAX_MTU_SEG UINT8_MAX
|
|
|
|
#define EM_TX_MAX_SEG UINT8_MAX
|
|
|
|
#define EM_TX_MAX_MTU_SEG UINT8_MAX
|
|
|
|
|
2017-06-12 06:48:22 +00:00
|
|
|
#define MAC_TYPE_FILTER_SUP(type) do {\
|
|
|
|
if ((type) != e1000_82580 && (type) != e1000_i350 &&\
|
|
|
|
(type) != e1000_82576 && (type) != e1000_i210 &&\
|
|
|
|
(type) != e1000_i211)\
|
|
|
|
return -ENOTSUP;\
|
|
|
|
} while (0)
|
|
|
|
|
2017-06-12 06:48:25 +00:00
|
|
|
#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
|
|
|
|
if ((type) != e1000_82580 && (type) != e1000_i350 &&\
|
|
|
|
(type) != e1000_i210 && (type) != e1000_i211)\
|
|
|
|
return -ENOTSUP; \
|
|
|
|
} while (0)
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* structure for interrupt relative data */
|
|
|
|
struct e1000_interrupt {
|
|
|
|
uint32_t flags;
|
2013-06-03 00:00:00 +00:00
|
|
|
uint32_t mask;
|
2012-09-04 12:54:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* local vfta copy */
|
|
|
|
struct e1000_vfta {
|
2012-12-19 23:00:00 +00:00
|
|
|
uint32_t vfta[IGB_VFTA_SIZE];
|
2012-09-04 12:54:00 +00:00
|
|
|
};
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/*
|
|
|
|
* VF data which used by PF host only
|
|
|
|
*/
|
|
|
|
#define E1000_MAX_VF_MC_ENTRIES 30
|
|
|
|
struct e1000_vf_info {
|
2019-05-21 16:13:05 +00:00
|
|
|
uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
|
2013-06-03 00:00:00 +00:00
|
|
|
uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
|
|
|
|
uint16_t num_vf_mc_hashes;
|
|
|
|
uint16_t default_vf_vlan_id;
|
|
|
|
uint16_t vlans_enabled;
|
|
|
|
uint16_t pf_qos;
|
|
|
|
uint16_t vlan_count;
|
|
|
|
uint16_t tx_rate;
|
|
|
|
};
|
|
|
|
|
2015-02-21 01:53:08 +00:00
|
|
|
TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter);
|
|
|
|
|
|
|
|
struct e1000_flex_filter_info {
|
|
|
|
uint16_t len;
|
|
|
|
uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */
|
|
|
|
/* if mask bit is 1b, do not compare corresponding byte in dwords. */
|
|
|
|
uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE];
|
|
|
|
uint8_t priority;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Flex filter structure */
|
|
|
|
struct e1000_flex_filter {
|
|
|
|
TAILQ_ENTRY(e1000_flex_filter) entries;
|
|
|
|
uint16_t index; /* index of flex filter */
|
|
|
|
struct e1000_flex_filter_info filter_info;
|
|
|
|
uint16_t queue; /* rx queue assigned to */
|
|
|
|
};
|
|
|
|
|
2015-02-10 04:48:30 +00:00
|
|
|
TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter);
|
|
|
|
TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter);
|
|
|
|
|
|
|
|
struct e1000_5tuple_filter_info {
|
|
|
|
uint32_t dst_ip;
|
|
|
|
uint32_t src_ip;
|
|
|
|
uint16_t dst_port;
|
|
|
|
uint16_t src_port;
|
|
|
|
uint8_t proto; /* l4 protocol. */
|
|
|
|
/* the packet matched above 5tuple and contain any set bit will hit this filter. */
|
|
|
|
uint8_t tcp_flags;
|
|
|
|
uint8_t priority; /* seven levels (001b-111b), 111b is highest,
|
|
|
|
used when more than one filter matches. */
|
|
|
|
uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
|
|
|
|
src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
|
|
|
|
dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
|
|
|
|
src_port_mask:1, /* if mask is 1b, do not compare src port. */
|
|
|
|
proto_mask:1; /* if mask is 1b, do not compare protocol. */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct e1000_2tuple_filter_info {
|
|
|
|
uint16_t dst_port;
|
|
|
|
uint8_t proto; /* l4 protocol. */
|
|
|
|
/* the packet matched above 2tuple and contain any set bit will hit this filter. */
|
|
|
|
uint8_t tcp_flags;
|
|
|
|
uint8_t priority; /* seven levels (001b-111b), 111b is highest,
|
|
|
|
used when more than one filter matches. */
|
|
|
|
uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
|
|
|
|
src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
|
|
|
|
dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
|
|
|
|
src_port_mask:1, /* if mask is 1b, do not compare src port. */
|
|
|
|
proto_mask:1; /* if mask is 1b, do not compare protocol. */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* 5tuple filter structure */
|
|
|
|
struct e1000_5tuple_filter {
|
|
|
|
TAILQ_ENTRY(e1000_5tuple_filter) entries;
|
|
|
|
uint16_t index; /* the index of 5tuple filter */
|
|
|
|
struct e1000_5tuple_filter_info filter_info;
|
|
|
|
uint16_t queue; /* rx queue assigned to */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* 2tuple filter structure */
|
|
|
|
struct e1000_2tuple_filter {
|
|
|
|
TAILQ_ENTRY(e1000_2tuple_filter) entries;
|
|
|
|
uint16_t index; /* the index of 2tuple filter */
|
|
|
|
struct e1000_2tuple_filter_info filter_info;
|
|
|
|
uint16_t queue; /* rx queue assigned to */
|
|
|
|
};
|
|
|
|
|
2017-06-12 06:48:20 +00:00
|
|
|
/* ethertype filter structure */
|
|
|
|
struct igb_ethertype_filter {
|
|
|
|
uint16_t ethertype;
|
|
|
|
uint32_t etqf;
|
|
|
|
};
|
|
|
|
|
2018-01-09 06:44:29 +00:00
|
|
|
struct igb_rte_flow_rss_conf {
|
2018-04-25 15:27:50 +00:00
|
|
|
struct rte_flow_action_rss conf; /**< RSS parameters. */
|
|
|
|
uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
|
2018-09-13 07:06:23 +00:00
|
|
|
/* Queues indices to use. */
|
|
|
|
uint16_t queue[IGB_MAX_RX_QUEUE_NUM_82576];
|
2018-01-09 06:44:29 +00:00
|
|
|
};
|
|
|
|
|
2015-01-12 07:16:11 +00:00
|
|
|
/*
|
2017-06-12 06:48:20 +00:00
|
|
|
* Structure to store filters'info.
|
2015-01-12 07:16:11 +00:00
|
|
|
*/
|
|
|
|
struct e1000_filter_info {
|
|
|
|
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
|
|
|
|
/* store used ethertype filters*/
|
2017-06-12 06:48:20 +00:00
|
|
|
struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
|
2015-02-21 01:53:08 +00:00
|
|
|
uint8_t flex_mask; /* Bit mask for every used flex filter */
|
|
|
|
struct e1000_flex_filter_list flex_list;
|
2015-02-10 04:48:30 +00:00
|
|
|
/* Bit mask for every used 5tuple filter */
|
|
|
|
uint8_t fivetuple_mask;
|
|
|
|
struct e1000_5tuple_filter_list fivetuple_list;
|
|
|
|
/* Bit mask for every used 2tuple filter */
|
|
|
|
uint8_t twotuple_mask;
|
|
|
|
struct e1000_2tuple_filter_list twotuple_list;
|
2017-06-12 06:48:18 +00:00
|
|
|
/* store the SYN filter info */
|
|
|
|
uint32_t syn_info;
|
2018-01-09 06:44:29 +00:00
|
|
|
/* store the rss filter info */
|
|
|
|
struct igb_rte_flow_rss_conf rss_info;
|
2015-01-12 07:16:11 +00:00
|
|
|
};
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/*
|
|
|
|
* Structure to store private data for each driver instance (for each port).
|
|
|
|
*/
|
|
|
|
struct e1000_adapter {
|
|
|
|
struct e1000_hw hw;
|
|
|
|
struct e1000_hw_stats stats;
|
|
|
|
struct e1000_interrupt intr;
|
|
|
|
struct e1000_vfta shadow_vfta;
|
2013-06-03 00:00:00 +00:00
|
|
|
struct e1000_vf_info *vfdata;
|
2015-01-12 07:16:11 +00:00
|
|
|
struct e1000_filter_info filter;
|
2015-07-03 14:38:26 +00:00
|
|
|
bool stopped;
|
2015-11-13 16:09:10 +00:00
|
|
|
struct rte_timecounter systime_tc;
|
|
|
|
struct rte_timecounter rx_tstamp_tc;
|
|
|
|
struct rte_timecounter tx_tstamp_tc;
|
2012-09-04 12:54:00 +00:00
|
|
|
};
|
|
|
|
|
2015-07-03 14:38:26 +00:00
|
|
|
#define E1000_DEV_PRIVATE(adapter) \
|
|
|
|
((struct e1000_adapter *)adapter)
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
#define E1000_DEV_PRIVATE_TO_HW(adapter) \
|
|
|
|
(&((struct e1000_adapter *)adapter)->hw)
|
|
|
|
|
|
|
|
#define E1000_DEV_PRIVATE_TO_STATS(adapter) \
|
|
|
|
(&((struct e1000_adapter *)adapter)->stats)
|
|
|
|
|
|
|
|
#define E1000_DEV_PRIVATE_TO_INTR(adapter) \
|
|
|
|
(&((struct e1000_adapter *)adapter)->intr)
|
|
|
|
|
|
|
|
#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
|
|
|
|
(&((struct e1000_adapter *)adapter)->shadow_vfta)
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
#define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
|
|
|
|
(&((struct e1000_adapter *)adapter)->vfdata)
|
|
|
|
|
2015-01-12 07:16:11 +00:00
|
|
|
#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
|
|
|
|
(&((struct e1000_adapter *)adapter)->filter)
|
|
|
|
|
2017-06-12 06:48:26 +00:00
|
|
|
struct rte_flow {
|
|
|
|
enum rte_filter_type filter_type;
|
|
|
|
void *rule;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* ntuple filter list structure */
|
|
|
|
struct igb_ntuple_filter_ele {
|
|
|
|
TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
|
|
|
|
struct rte_eth_ntuple_filter filter_info;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* ethertype filter list structure */
|
|
|
|
struct igb_ethertype_filter_ele {
|
|
|
|
TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
|
|
|
|
struct rte_eth_ethertype_filter filter_info;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* syn filter list structure */
|
|
|
|
struct igb_eth_syn_filter_ele {
|
|
|
|
TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
|
|
|
|
struct rte_eth_syn_filter filter_info;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* flex filter list structure */
|
|
|
|
struct igb_flex_filter_ele {
|
|
|
|
TAILQ_ENTRY(igb_flex_filter_ele) entries;
|
|
|
|
struct rte_eth_flex_filter filter_info;
|
|
|
|
};
|
|
|
|
|
2018-01-09 06:44:29 +00:00
|
|
|
/* rss filter list structure */
|
|
|
|
struct igb_rss_conf_ele {
|
|
|
|
TAILQ_ENTRY(igb_rss_conf_ele) entries;
|
|
|
|
struct igb_rte_flow_rss_conf filter_info;
|
|
|
|
};
|
|
|
|
|
2017-06-12 06:48:26 +00:00
|
|
|
/* igb_flow memory list structure */
|
|
|
|
struct igb_flow_mem {
|
|
|
|
TAILQ_ENTRY(igb_flow_mem) entries;
|
|
|
|
struct rte_flow *flow;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
};
|
|
|
|
|
|
|
|
TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
|
2019-09-05 14:53:07 +00:00
|
|
|
extern struct igb_ntuple_filter_list igb_filter_ntuple_list;
|
2017-06-12 06:48:26 +00:00
|
|
|
TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
|
2019-09-05 14:53:07 +00:00
|
|
|
extern struct igb_ethertype_filter_list igb_filter_ethertype_list;
|
2017-06-12 06:48:26 +00:00
|
|
|
TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
|
2019-09-05 14:53:07 +00:00
|
|
|
extern struct igb_syn_filter_list igb_filter_syn_list;
|
2017-06-12 06:48:26 +00:00
|
|
|
TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
|
2019-09-05 14:53:07 +00:00
|
|
|
extern struct igb_flex_filter_list igb_filter_flex_list;
|
2018-01-09 06:44:29 +00:00
|
|
|
TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele);
|
2019-09-05 14:53:07 +00:00
|
|
|
extern struct igb_rss_filter_list igb_filter_rss_list;
|
2017-06-12 06:48:26 +00:00
|
|
|
TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
|
2019-09-05 14:53:07 +00:00
|
|
|
extern struct igb_flow_mem_list igb_flow_list;
|
2017-06-12 06:48:26 +00:00
|
|
|
|
2017-06-12 06:48:22 +00:00
|
|
|
extern const struct rte_flow_ops igb_flow_ops;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/*
|
2012-12-19 23:00:00 +00:00
|
|
|
* RX/TX IGB function prototypes
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2012-12-19 23:00:00 +00:00
|
|
|
void eth_igb_tx_queue_release(void *txq);
|
|
|
|
void eth_igb_rx_queue_release(void *rxq);
|
2012-09-04 12:54:00 +00:00
|
|
|
void igb_dev_clear_queues(struct rte_eth_dev *dev);
|
2015-07-03 14:38:27 +00:00
|
|
|
void igb_dev_free_queues(struct rte_eth_dev *dev);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2018-04-03 02:54:55 +00:00
|
|
|
uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
|
|
|
|
uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
|
|
|
uint16_t nb_rx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mb_pool);
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
|
2013-06-03 00:00:00 +00:00
|
|
|
uint16_t rx_queue_id);
|
|
|
|
|
2013-07-22 22:00:00 +00:00
|
|
|
int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
|
|
|
|
|
2017-03-29 08:36:30 +00:00
|
|
|
int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
|
|
|
|
int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
|
|
|
|
|
2018-04-03 02:54:56 +00:00
|
|
|
uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
|
|
|
|
uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
|
|
|
uint16_t nb_tx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf);
|
|
|
|
|
2017-03-24 18:55:54 +00:00
|
|
|
int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
int eth_igb_rx_init(struct rte_eth_dev *dev);
|
|
|
|
|
|
|
|
void eth_igb_tx_init(struct rte_eth_dev *dev);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
|
2012-09-04 12:54:00 +00:00
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2016-12-23 18:40:48 +00:00
|
|
|
uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
|
2012-09-04 12:54:00 +00:00
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
uint16_t eth_igb_recv_scattered_pkts(void *rxq,
|
2012-09-04 12:54:00 +00:00
|
|
|
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
|
|
|
|
|
2014-05-16 08:58:40 +00:00
|
|
|
int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf);
|
|
|
|
|
2014-05-16 08:58:42 +00:00
|
|
|
int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
int eth_igbvf_rx_init(struct rte_eth_dev *dev);
|
|
|
|
|
|
|
|
void eth_igbvf_tx_init(struct rte_eth_dev *dev);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* misc function prototypes
|
|
|
|
*/
|
|
|
|
void igb_pf_host_init(struct rte_eth_dev *eth_dev);
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2015-10-27 12:51:46 +00:00
|
|
|
void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_rxq_info *qinfo);
|
|
|
|
|
|
|
|
void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_txq_info *qinfo);
|
|
|
|
|
2018-04-03 02:54:55 +00:00
|
|
|
uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
/*
|
|
|
|
* RX/TX EM function prototypes
|
|
|
|
*/
|
|
|
|
void eth_em_tx_queue_release(void *txq);
|
|
|
|
void eth_em_rx_queue_release(void *rxq);
|
|
|
|
|
|
|
|
void em_dev_clear_queues(struct rte_eth_dev *dev);
|
2015-07-03 14:38:27 +00:00
|
|
|
void em_dev_free_queues(struct rte_eth_dev *dev);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2018-04-03 02:54:55 +00:00
|
|
|
uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
|
|
|
|
uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
|
|
|
uint16_t nb_rx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mb_pool);
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
|
2013-06-03 00:00:00 +00:00
|
|
|
uint16_t rx_queue_id);
|
|
|
|
|
2013-07-22 22:00:00 +00:00
|
|
|
int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
|
|
|
|
|
2017-03-29 08:36:31 +00:00
|
|
|
int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
|
|
|
|
int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
|
|
|
|
|
2018-04-03 02:54:56 +00:00
|
|
|
uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
|
|
|
|
uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
|
|
|
uint16_t nb_tx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf);
|
|
|
|
|
|
|
|
int eth_em_rx_init(struct rte_eth_dev *dev);
|
|
|
|
|
|
|
|
void eth_em_tx_init(struct rte_eth_dev *dev);
|
|
|
|
|
|
|
|
uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2016-12-23 18:40:48 +00:00
|
|
|
uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
|
|
|
uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2015-10-27 12:51:46 +00:00
|
|
|
void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_rxq_info *qinfo);
|
|
|
|
|
|
|
|
void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_txq_info *qinfo);
|
|
|
|
|
2015-07-03 14:38:26 +00:00
|
|
|
void igb_pf_host_uninit(struct rte_eth_dev *dev);
|
|
|
|
|
2017-06-12 06:48:28 +00:00
|
|
|
void igb_filterlist_flush(struct rte_eth_dev *dev);
|
|
|
|
int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
|
|
|
|
struct e1000_5tuple_filter *filter);
|
|
|
|
int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
|
|
|
|
struct e1000_2tuple_filter *filter);
|
|
|
|
void igb_remove_flex_filter(struct rte_eth_dev *dev,
|
|
|
|
struct e1000_flex_filter *filter);
|
|
|
|
int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
|
|
|
|
uint8_t idx);
|
2017-06-12 06:48:26 +00:00
|
|
|
int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_ntuple_filter *ntuple_filter, bool add);
|
|
|
|
int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_ethertype_filter *filter,
|
|
|
|
bool add);
|
|
|
|
int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_syn_filter *filter,
|
|
|
|
bool add);
|
|
|
|
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_flex_filter *filter,
|
|
|
|
bool add);
|
2018-09-13 07:06:23 +00:00
|
|
|
int igb_rss_conf_init(struct rte_eth_dev *dev,
|
|
|
|
struct igb_rte_flow_rss_conf *out,
|
2018-04-25 15:27:50 +00:00
|
|
|
const struct rte_flow_action_rss *in);
|
|
|
|
int igb_action_rss_same(const struct rte_flow_action_rss *comp,
|
|
|
|
const struct rte_flow_action_rss *with);
|
2018-01-09 06:44:29 +00:00
|
|
|
int igb_config_rss_filter(struct rte_eth_dev *dev,
|
|
|
|
struct igb_rte_flow_rss_conf *conf,
|
|
|
|
bool add);
|
2019-07-22 15:11:52 +00:00
|
|
|
void em_flush_desc_rings(struct rte_eth_dev *dev);
|
2018-01-09 06:44:29 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
#endif /* _E1000_ETHDEV_H_ */
|