net/hns3: support Rx descriptor advanced layout

Currently, the driver get packet type by parse the
L3_ID/L4_ID/OL3_ID/OL4_ID from Rx descriptor and then lookup multiple
tables, it's time consuming.

Now Kunpeng930 support advanced RXD layout, which:
1. Combine OL3_ID/OL4_ID to 8bit PTYPE filed, so the driver get packet
   type by lookup only one table.  Note: L3_ID/L4_ID become reserved
   fields.
2. The 1588 timestamp located at Rx descriptor instead of query from
   firmware.
3. The L3E/L4E/OL3E/OL4E will be zero when L3L4P is zero, so driver
   could optimize the good checksum calculations (when L3E/L4E is zero
   then mark PKT_RX_IP_CKSUM_GOOD/PKT_RX_L4_CKSUM_GOOD).

Considering compatibility, the firmware will report capability of
RXD advanced layout, the driver will identify and enable it by default.

This patch only provides basic function: identify and enable the RXD
advanced layout, and lookup ptype table if supported.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
This commit is contained in:
Chengwen Feng 2021-03-04 15:44:49 +08:00 committed by Ferruh Yigit
parent 2e4859f3b3
commit fb5e906940
8 changed files with 243 additions and 2 deletions

View File

@ -409,8 +409,9 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
return retval;
}
static void hns3_parse_capability(struct hns3_hw *hw,
struct hns3_query_version_cmd *cmd)
static void
hns3_parse_capability(struct hns3_hw *hw,
struct hns3_query_version_cmd *cmd)
{
uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
@ -429,6 +430,9 @@ static void hns3_parse_capability(struct hns3_hw *hw,
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
1);
}
static uint32_t

View File

@ -312,6 +312,11 @@ enum HNS3_CAPS_BITS {
HNS3_CAPS_TQP_TXRX_INDEP_B,
HNS3_CAPS_HW_PAD_B,
HNS3_CAPS_STASH_B,
HNS3_CAPS_UDP_TUNNEL_CSUM_B,
HNS3_CAPS_RAS_IMP_B,
HNS3_CAPS_FEC_B,
HNS3_CAPS_PAUSE_B,
HNS3_CAPS_RXD_ADV_LAYOUT_B,
};
enum HNS3_API_CAP_BITS {

View File

@ -4970,6 +4970,8 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
return ret;
}
hns3_enable_rxd_adv_layout(hw);
ret = hns3_init_queues(hns, reset_queue);
if (ret) {
PMD_INIT_LOG(ERR, "failed to init queues, ret = %d.", ret);

View File

@ -667,8 +667,13 @@ struct hns3_mp_param {
#define HNS3_OL2TBL_NUM 4
#define HNS3_OL3TBL_NUM 16
#define HNS3_OL4TBL_NUM 16
#define HNS3_PTYPE_NUM 256
struct hns3_ptype_table {
/*
* The next fields used to calc packet-type by the
* L3_ID/L4_ID/OL3_ID/OL4_ID from the Rx descriptor.
*/
uint32_t l2l3table[HNS3_L2TBL_NUM][HNS3_L3TBL_NUM];
uint32_t l4table[HNS3_L4TBL_NUM];
uint32_t inner_l2table[HNS3_L2TBL_NUM];
@ -677,6 +682,13 @@ struct hns3_ptype_table {
uint32_t ol2table[HNS3_OL2TBL_NUM];
uint32_t ol3table[HNS3_OL3TBL_NUM];
uint32_t ol4table[HNS3_OL4TBL_NUM];
/*
* The next field used to calc packet-type by the PTYPE from the Rx
* descriptor, it functions only when firmware report the capability of
* HNS3_CAPS_RXD_ADV_LAYOUT_B and driver enabled it.
*/
uint32_t ptype[HNS3_PTYPE_NUM] __rte_cache_min_aligned;
};
#define HNS3_FIXED_MAX_TQP_NUM_MODE 0
@ -771,6 +783,7 @@ struct hns3_adapter {
#define HNS3_DEV_SUPPORT_TX_PUSH_B 0x5
#define HNS3_DEV_SUPPORT_INDEP_TXRX_B 0x6
#define HNS3_DEV_SUPPORT_STASH_B 0x7
#define HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B 0x9
#define hns3_dev_dcb_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B)
@ -801,6 +814,9 @@ struct hns3_adapter {
#define hns3_dev_stash_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B)
#define hns3_dev_rxd_adv_layout_supported(hw) \
hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B)
#define HNS3_DEV_PRIVATE_TO_HW(adapter) \
(&((struct hns3_adapter *)adapter)->hw)
#define HNS3_DEV_PRIVATE_TO_PF(adapter) \

View File

@ -2125,6 +2125,8 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
if (ret)
return ret;
hns3_enable_rxd_adv_layout(hw);
ret = hns3_init_queues(hns, reset_queue);
if (ret)
hns3_err(hw, "failed to init queues, ret = %d.", ret);

View File

@ -36,6 +36,7 @@
#define HNS3_GLOBAL_RESET_REG 0x20A00
#define HNS3_FUN_RST_ING 0x20C00
#define HNS3_GRO_EN_REG 0x28000
#define HNS3_RXD_ADV_LAYOUT_EN_REG 0x28008
/* Vector0 register bits for reset */
#define HNS3_VECTOR0_FUNCRESET_INT_B 0

View File

@ -1802,6 +1802,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
HNS3_PORT_BASE_VLAN_ENABLE;
else
rxq->pvid_sw_discard_en = false;
rxq->ptype_en = hns3_dev_rxd_adv_layout_supported(hw) ? true : false;
rxq->configured = true;
rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
@ -1987,6 +1988,193 @@ hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
}
static void
hns3_init_adv_layout_ptype(struct hns3_ptype_table *tbl)
{
uint32_t *ptype = tbl->ptype;
/* Non-tunnel L2 */
ptype[1] = RTE_PTYPE_L2_ETHER_ARP;
ptype[3] = RTE_PTYPE_L2_ETHER_LLDP;
ptype[8] = RTE_PTYPE_L2_ETHER_TIMESYNC;
/* Non-tunnel IPv4 */
ptype[17] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_FRAG;
ptype[18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_NONFRAG;
ptype[19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
/* The next ptype is GRE over IPv4 */
ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP;
ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_IGMP;
ptype[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_ICMP;
/* The next ptype is PTP over IPv4 + UDP */
ptype[25] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
/* IPv4 --> GRE/Teredo/VXLAN */
ptype[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT;
/* IPv4 --> GRE/Teredo/VXLAN --> MAC */
ptype[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
ptype[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG;
ptype[32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
ptype[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP;
ptype[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
ptype[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_SCTP;
/* The next ptype's inner L4 is IGMP */
ptype[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
ptype[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_ICMP;
/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
ptype[39] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG;
ptype[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
ptype[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP;
ptype[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
ptype[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_SCTP;
/* The next ptype's inner L4 is IGMP */
ptype[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
ptype[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_ICMP;
/* Non-tunnel IPv6 */
ptype[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_FRAG;
ptype[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_NONFRAG;
ptype[113] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
/* The next ptype is GRE over IPv6 */
ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP;
ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_IGMP;
ptype[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_ICMP;
/* Special for PTP over IPv6 + UDP */
ptype[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
/* IPv6 --> GRE/Teredo/VXLAN */
ptype[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT;
/* IPv6 --> GRE/Teredo/VXLAN --> MAC */
ptype[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER;
/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
ptype[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG;
ptype[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
ptype[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP;
ptype[128] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
ptype[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_SCTP;
/* The next ptype's inner L4 is IGMP */
ptype[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
ptype[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_ICMP;
/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
ptype[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG;
ptype[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
ptype[135] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP;
ptype[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
ptype[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_SCTP;
/* The next ptype's inner L4 is IGMP */
ptype[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
ptype[139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_ICMP;
}
void
hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
{
@ -1997,6 +2185,7 @@ hns3_init_rx_ptype_tble(struct rte_eth_dev *dev)
hns3_init_non_tunnel_ptype_tbl(tbl);
hns3_init_tunnel_ptype_tbl(tbl);
hns3_init_adv_layout_ptype(tbl);
}
static inline void
@ -4012,3 +4201,14 @@ hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
else
return fbd_num - driver_hold_bd_num;
}
void
hns3_enable_rxd_adv_layout(struct hns3_hw *hw)
{
/*
* If the hardware support rxd advanced layout, then driver enable it
* default.
*/
if (hns3_dev_rxd_adv_layout_supported(hw))
hns3_write_dev(hw, HNS3_RXD_ADV_LAYOUT_EN_REG, 1);
}

View File

@ -88,6 +88,8 @@
#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
#define HNS3_RXD_OL4ID_S 8
#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
#define HNS3_RXD_PTYPE_S 4
#define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S)
#define HNS3_RXD_FBHI_S 12
#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
#define HNS3_RXD_FBLI_S 14
@ -328,6 +330,7 @@ struct hns3_rx_queue {
* point, the pvid_sw_discard_en will be false.
*/
bool pvid_sw_discard_en;
bool ptype_en; /* indicate if the ptype field enabled */
bool enabled; /* indicate if Rx queue has been enabled */
struct hns3_rx_basic_stats basic_stats;
@ -609,6 +612,13 @@ hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl;
uint32_t l2id, l3id, l4id;
uint32_t ol3id, ol4id, ol2id;
uint32_t ptype;
if (rxq->ptype_en) {
ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M,
HNS3_RXD_PTYPE_S);
return ptype_tbl->ptype[ptype];
}
ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
@ -707,5 +717,6 @@ int hns3_start_all_rxqs(struct rte_eth_dev *dev);
void hns3_stop_all_txqs(struct rte_eth_dev *dev);
void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
#endif /* _HNS3_RXTX_H_ */