2019-09-26 14:01:58 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2018-2019 Hisilicon Limited.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _HNS3_DCB_H_
|
|
|
|
#define _HNS3_DCB_H_
|
|
|
|
|
2020-08-25 11:52:56 +00:00
|
|
|
#define HNS3_ETHER_MAX_RATE 100000
|
|
|
|
|
2019-09-26 14:01:58 +00:00
|
|
|
/* MAC Pause */
|
|
|
|
#define HNS3_TX_MAC_PAUSE_EN_MSK BIT(0)
|
|
|
|
#define HNS3_RX_MAC_PAUSE_EN_MSK BIT(1)
|
|
|
|
|
|
|
|
#define HNS3_DEFAULT_PAUSE_TRANS_GAP 0x18
|
|
|
|
#define HNS3_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
|
|
|
|
|
|
|
|
/* SP or DWRR */
|
|
|
|
#define HNS3_DCB_TX_SCHD_DWRR_MSK BIT(0)
|
2020-09-29 12:01:13 +00:00
|
|
|
#define HNS3_DCB_TX_SCHD_SP_MSK 0xFE
|
2019-09-26 14:01:58 +00:00
|
|
|
|
|
|
|
enum hns3_shap_bucket {
|
|
|
|
HNS3_DCB_SHAP_C_BUCKET = 0,
|
|
|
|
HNS3_DCB_SHAP_P_BUCKET,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_priority_weight_cmd {
|
|
|
|
uint8_t pri_id;
|
|
|
|
uint8_t dwrr;
|
2020-08-25 11:53:05 +00:00
|
|
|
uint8_t rsvd[22];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_qs_weight_cmd {
|
|
|
|
uint16_t qs_id;
|
|
|
|
uint8_t dwrr;
|
2020-08-25 11:53:05 +00:00
|
|
|
uint8_t rsvd[21];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_pg_weight_cmd {
|
|
|
|
uint8_t pg_id;
|
|
|
|
uint8_t dwrr;
|
2020-08-25 11:53:05 +00:00
|
|
|
uint8_t rsvd[22];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_ets_tc_weight_cmd {
|
|
|
|
uint8_t tc_weight[HNS3_MAX_TC_NUM];
|
|
|
|
uint8_t weight_offset;
|
|
|
|
uint8_t rsvd[15];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_qs_to_pri_link_cmd {
|
|
|
|
uint16_t qs_id;
|
|
|
|
uint16_t rsvd;
|
|
|
|
uint8_t priority;
|
|
|
|
#define HNS3_DCB_QS_PRI_LINK_VLD_MSK BIT(0)
|
net/hns3: maximize queue number
The maximum number of queues for hns3 PF and VF driver is 64 based on
hns3 network engine with revision_id equals 0x21. Based on hns3 network
engine with revision_id equals 0x30, the hns3 PF PMD driver can support
up to 1280 queues, and hns3 VF PMD driver can support up to 128 queues.
The following points need to be modified to support maximizing queue
number and maintain better compatibility:
1) Maximizing the number of queues for hns3 PF and VF PMD driver In
current version, VF is not supported when PF is driven by hns3 PMD
driver. If maximum queue numbers allocated to PF PMD driver is less
than total tqps_num allocated to this port, all remaining number of
queues are mapped to VF function, which is unreasonable. So we fix
that all remaining number of queues are mapped to PF function.
Using RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF which comes from
configuration file to limit the queue number allocated to PF device
based on hns3 network engine with revision_id greater than 0x30. And
PF device still keep the maximum 64 queues based on hns3 network
engine with revision_id equals 0x21.
Remove restriction of the macro HNS3_MAX_TQP_NUM_PER_FUNC on the
maximum number of queues in hns3 VF PMD driver and use the value
allocated by hns3 PF kernel netdev driver.
2) According to the queue number allocated to PF device, a variable
array for Rx and Tx queue is dynamically allocated to record the
statistics of Rx and Tx queues during the .dev_init ops
implementation function.
3) Add an extended field in hns3_pf_res_cmd to support the case that
numbers of queue are greater than 1024.
4) Use new base address of Rx or Tx queue if QUEUE_ID of Rx or Tx queue
is greater than 1024.
5) Remove queue id mask and use all bits of actual queue_id as the
queue_id to configure hardware.
6) Currently, 0~9 bits of qset_id in hns3_nq_to_qs_link_cmd used to
record actual qset id and 10 bit as VLD bit are configured to
hardware. So we also need to use 11~15 bits when actual qset_id is
greater than 1024.
7) The number of queue sets based on different network engine are
different. We use it to calculate group number and configure to
hardware in the backpressure configuration.
8) Adding check operations for number of Rx and Tx queue user configured
when mapping queue to tc Rx queue numbers under a single TC must be
less than rss_size_max supported by a single TC. Rx and Tx queue
numbers are allocated to every TC by average. So Rx and Tx queue
numbers must be an integer multiple of 2, or redundant queues are not
available.
9) We can specify which packets enter the queue with a specific queue
number, when creating flow table rules by rte_flow API. Currently,
driver uses 0~9 bits to record the queue_id. So it is necessary to
extend one bit field to record queue_id and configure to hardware, if
the queue_id is greater than 1024.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
2020-09-29 12:01:10 +00:00
|
|
|
#define HNS3_DCB_QS_ID_L_MSK GENMASK(9, 0)
|
|
|
|
#define HNS3_DCB_QS_ID_L_S 0
|
|
|
|
#define HNS3_DCB_QS_ID_H_MSK GENMASK(14, 10)
|
|
|
|
#define HNS3_DCB_QS_ID_H_S 10
|
|
|
|
#define HNS3_DCB_QS_ID_H_EXT_S 11
|
|
|
|
#define HNS3_DCB_QS_ID_H_EXT_MSK GENMASK(15, 11)
|
2019-09-26 14:01:58 +00:00
|
|
|
uint8_t link_vld;
|
2020-08-25 11:53:05 +00:00
|
|
|
uint8_t rsvd1[18];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_nq_to_qs_link_cmd {
|
|
|
|
uint16_t nq_id;
|
|
|
|
uint16_t rsvd;
|
|
|
|
#define HNS3_DCB_Q_QS_LINK_VLD_MSK BIT(10)
|
|
|
|
uint16_t qset_id;
|
2020-08-25 11:53:05 +00:00
|
|
|
uint8_t rsvd1[18];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define HNS3_DCB_SHAP_IR_B_MSK GENMASK(7, 0)
|
|
|
|
#define HNS3_DCB_SHAP_IR_B_LSH 0
|
|
|
|
#define HNS3_DCB_SHAP_IR_U_MSK GENMASK(11, 8)
|
|
|
|
#define HNS3_DCB_SHAP_IR_U_LSH 8
|
|
|
|
#define HNS3_DCB_SHAP_IR_S_MSK GENMASK(15, 12)
|
|
|
|
#define HNS3_DCB_SHAP_IR_S_LSH 12
|
|
|
|
#define HNS3_DCB_SHAP_BS_B_MSK GENMASK(20, 16)
|
|
|
|
#define HNS3_DCB_SHAP_BS_B_LSH 16
|
|
|
|
#define HNS3_DCB_SHAP_BS_S_MSK GENMASK(25, 21)
|
|
|
|
#define HNS3_DCB_SHAP_BS_S_LSH 21
|
|
|
|
|
2020-09-29 12:01:13 +00:00
|
|
|
/*
|
|
|
|
* For more flexible selection of shapping algorithm in different network
|
|
|
|
* engine, the algorithm calculating shapping parameter is moved to firmware to
|
|
|
|
* execute. Bit HNS3_TM_RATE_VLD_B of flag field in hns3_pri_shapping_cmd,
|
|
|
|
* hns3_pg_shapping_cmd or hns3_port_shapping_cmd is set to 1 to require
|
|
|
|
* firmware to recalculate shapping parameters. However, whether the parameters
|
|
|
|
* are recalculated depends on the firmware version. If firmware doesn't support
|
|
|
|
* the calculation of shapping parameters, such as on network engine with
|
|
|
|
* revision id 0x21, the value driver calculated will be used to configure to
|
|
|
|
* hardware. On the contrary, firmware ignores configuration of driver
|
|
|
|
* and recalculates the parameter.
|
|
|
|
*/
|
|
|
|
#define HNS3_TM_RATE_VLD_B 0
|
|
|
|
|
2019-09-26 14:01:58 +00:00
|
|
|
struct hns3_pri_shapping_cmd {
|
|
|
|
uint8_t pri_id;
|
|
|
|
uint8_t rsvd[3];
|
|
|
|
uint32_t pri_shapping_para;
|
2020-09-29 12:01:13 +00:00
|
|
|
uint8_t flag;
|
|
|
|
uint8_t rsvd1[3];
|
|
|
|
uint32_t pri_rate; /* Unit Mbps */
|
|
|
|
uint8_t rsvd2[8];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_pg_shapping_cmd {
|
|
|
|
uint8_t pg_id;
|
|
|
|
uint8_t rsvd[3];
|
|
|
|
uint32_t pg_shapping_para;
|
2020-09-29 12:01:13 +00:00
|
|
|
uint8_t flag;
|
|
|
|
uint8_t rsvd1[3];
|
|
|
|
uint32_t pg_rate; /* Unit Mbps */
|
|
|
|
uint8_t rsvd2[8];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_port_shapping_cmd {
|
|
|
|
uint32_t port_shapping_para;
|
|
|
|
uint8_t flag;
|
|
|
|
uint8_t rsvd[3];
|
|
|
|
uint32_t port_rate; /* Unit Mbps */
|
|
|
|
uint8_t rsvd1[12];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
net/hns3: maximize queue number
The maximum number of queues for hns3 PF and VF driver is 64 based on
hns3 network engine with revision_id equals 0x21. Based on hns3 network
engine with revision_id equals 0x30, the hns3 PF PMD driver can support
up to 1280 queues, and hns3 VF PMD driver can support up to 128 queues.
The following points need to be modified to support maximizing queue
number and maintain better compatibility:
1) Maximizing the number of queues for hns3 PF and VF PMD driver In
current version, VF is not supported when PF is driven by hns3 PMD
driver. If maximum queue numbers allocated to PF PMD driver is less
than total tqps_num allocated to this port, all remaining number of
queues are mapped to VF function, which is unreasonable. So we fix
that all remaining number of queues are mapped to PF function.
Using RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF which comes from
configuration file to limit the queue number allocated to PF device
based on hns3 network engine with revision_id greater than 0x30. And
PF device still keep the maximum 64 queues based on hns3 network
engine with revision_id equals 0x21.
Remove restriction of the macro HNS3_MAX_TQP_NUM_PER_FUNC on the
maximum number of queues in hns3 VF PMD driver and use the value
allocated by hns3 PF kernel netdev driver.
2) According to the queue number allocated to PF device, a variable
array for Rx and Tx queue is dynamically allocated to record the
statistics of Rx and Tx queues during the .dev_init ops
implementation function.
3) Add an extended field in hns3_pf_res_cmd to support the case that
numbers of queue are greater than 1024.
4) Use new base address of Rx or Tx queue if QUEUE_ID of Rx or Tx queue
is greater than 1024.
5) Remove queue id mask and use all bits of actual queue_id as the
queue_id to configure hardware.
6) Currently, 0~9 bits of qset_id in hns3_nq_to_qs_link_cmd used to
record actual qset id and 10 bit as VLD bit are configured to
hardware. So we also need to use 11~15 bits when actual qset_id is
greater than 1024.
7) The number of queue sets based on different network engine are
different. We use it to calculate group number and configure to
hardware in the backpressure configuration.
8) Adding check operations for number of Rx and Tx queue user configured
when mapping queue to tc Rx queue numbers under a single TC must be
less than rss_size_max supported by a single TC. Rx and Tx queue
numbers are allocated to every TC by average. So Rx and Tx queue
numbers must be an integer multiple of 2, or redundant queues are not
available.
9) We can specify which packets enter the queue with a specific queue
number, when creating flow table rules by rte_flow API. Currently,
driver uses 0~9 bits to record the queue_id. So it is necessary to
extend one bit field to record queue_id and configure to hardware, if
the queue_id is greater than 1024.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
2020-09-29 12:01:10 +00:00
|
|
|
#define HNS3_BP_GRP_NUM 32
|
2019-09-26 14:01:58 +00:00
|
|
|
#define HNS3_BP_SUB_GRP_ID_S 0
|
|
|
|
#define HNS3_BP_SUB_GRP_ID_M GENMASK(4, 0)
|
|
|
|
#define HNS3_BP_GRP_ID_S 5
|
|
|
|
#define HNS3_BP_GRP_ID_M GENMASK(9, 5)
|
net/hns3: maximize queue number
The maximum number of queues for hns3 PF and VF driver is 64 based on
hns3 network engine with revision_id equals 0x21. Based on hns3 network
engine with revision_id equals 0x30, the hns3 PF PMD driver can support
up to 1280 queues, and hns3 VF PMD driver can support up to 128 queues.
The following points need to be modified to support maximizing queue
number and maintain better compatibility:
1) Maximizing the number of queues for hns3 PF and VF PMD driver In
current version, VF is not supported when PF is driven by hns3 PMD
driver. If maximum queue numbers allocated to PF PMD driver is less
than total tqps_num allocated to this port, all remaining number of
queues are mapped to VF function, which is unreasonable. So we fix
that all remaining number of queues are mapped to PF function.
Using RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF which comes from
configuration file to limit the queue number allocated to PF device
based on hns3 network engine with revision_id greater than 0x30. And
PF device still keep the maximum 64 queues based on hns3 network
engine with revision_id equals 0x21.
Remove restriction of the macro HNS3_MAX_TQP_NUM_PER_FUNC on the
maximum number of queues in hns3 VF PMD driver and use the value
allocated by hns3 PF kernel netdev driver.
2) According to the queue number allocated to PF device, a variable
array for Rx and Tx queue is dynamically allocated to record the
statistics of Rx and Tx queues during the .dev_init ops
implementation function.
3) Add an extended field in hns3_pf_res_cmd to support the case that
numbers of queue are greater than 1024.
4) Use new base address of Rx or Tx queue if QUEUE_ID of Rx or Tx queue
is greater than 1024.
5) Remove queue id mask and use all bits of actual queue_id as the
queue_id to configure hardware.
6) Currently, 0~9 bits of qset_id in hns3_nq_to_qs_link_cmd used to
record actual qset id and 10 bit as VLD bit are configured to
hardware. So we also need to use 11~15 bits when actual qset_id is
greater than 1024.
7) The number of queue sets based on different network engine are
different. We use it to calculate group number and configure to
hardware in the backpressure configuration.
8) Adding check operations for number of Rx and Tx queue user configured
when mapping queue to tc Rx queue numbers under a single TC must be
less than rss_size_max supported by a single TC. Rx and Tx queue
numbers are allocated to every TC by average. So Rx and Tx queue
numbers must be an integer multiple of 2, or redundant queues are not
available.
9) We can specify which packets enter the queue with a specific queue
number, when creating flow table rules by rte_flow API. Currently,
driver uses 0~9 bits to record the queue_id. So it is necessary to
extend one bit field to record queue_id and configure to hardware, if
the queue_id is greater than 1024.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
2020-09-29 12:01:10 +00:00
|
|
|
|
2019-09-26 14:01:58 +00:00
|
|
|
struct hns3_bp_to_qs_map_cmd {
|
|
|
|
uint8_t tc_id;
|
|
|
|
uint8_t rsvd[2];
|
|
|
|
uint8_t qs_group_id;
|
|
|
|
uint32_t qs_bit_map;
|
2020-08-25 11:53:05 +00:00
|
|
|
uint32_t rsvd1[4];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_pfc_en_cmd {
|
|
|
|
uint8_t tx_rx_en_bitmap;
|
|
|
|
uint8_t pri_en_bitmap;
|
2020-08-25 11:53:05 +00:00
|
|
|
uint8_t rsvd[22];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_cfg_pause_param_cmd {
|
|
|
|
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
|
|
|
|
uint8_t pause_trans_gap;
|
|
|
|
uint8_t rsvd;
|
|
|
|
uint16_t pause_trans_time;
|
|
|
|
uint8_t rsvd1[6];
|
|
|
|
/* extra mac address to do double check for pause frame */
|
|
|
|
uint8_t mac_addr_extra[RTE_ETHER_ADDR_LEN];
|
|
|
|
uint16_t rsvd2;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_pg_to_pri_link_cmd {
|
|
|
|
uint8_t pg_id;
|
|
|
|
uint8_t rsvd1[3];
|
|
|
|
uint8_t pri_bit_map;
|
2020-08-25 11:53:05 +00:00
|
|
|
uint8_t rsvd2[19];
|
2019-09-26 14:01:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum hns3_shaper_level {
|
|
|
|
HNS3_SHAPER_LVL_PRI = 0,
|
|
|
|
HNS3_SHAPER_LVL_PG = 1,
|
|
|
|
HNS3_SHAPER_LVL_PORT = 2,
|
|
|
|
HNS3_SHAPER_LVL_QSET = 3,
|
|
|
|
HNS3_SHAPER_LVL_CNT = 4,
|
|
|
|
HNS3_SHAPER_LVL_VF = 0,
|
|
|
|
HNS3_SHAPER_LVL_PF = 1,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_shaper_parameter {
|
|
|
|
uint32_t ir_b; /* IR_B parameter of IR shaper */
|
|
|
|
uint32_t ir_u; /* IR_U parameter of IR shaper */
|
|
|
|
uint32_t ir_s; /* IR_S parameter of IR shaper */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define hns3_dcb_set_field(dest, string, val) \
|
|
|
|
hns3_set_field((dest), \
|
|
|
|
(HNS3_DCB_SHAP_##string##_MSK), \
|
|
|
|
(HNS3_DCB_SHAP_##string##_LSH), val)
|
|
|
|
#define hns3_dcb_get_field(src, string) \
|
|
|
|
hns3_get_field((src), (HNS3_DCB_SHAP_##string##_MSK), \
|
|
|
|
(HNS3_DCB_SHAP_##string##_LSH))
|
|
|
|
|
|
|
|
int hns3_pause_addr_cfg(struct hns3_hw *hw, const uint8_t *mac_addr);
|
|
|
|
|
|
|
|
int hns3_dcb_configure(struct hns3_adapter *hns);
|
|
|
|
|
|
|
|
int hns3_dcb_init(struct hns3_hw *hw);
|
|
|
|
|
|
|
|
int hns3_dcb_init_hw(struct hns3_hw *hw);
|
|
|
|
|
|
|
|
int hns3_dcb_info_init(struct hns3_hw *hw);
|
|
|
|
|
net/hns3: maximize queue number
The maximum number of queues for hns3 PF and VF driver is 64 based on
hns3 network engine with revision_id equals 0x21. Based on hns3 network
engine with revision_id equals 0x30, the hns3 PF PMD driver can support
up to 1280 queues, and hns3 VF PMD driver can support up to 128 queues.
The following points need to be modified to support maximizing queue
number and maintain better compatibility:
1) Maximizing the number of queues for hns3 PF and VF PMD driver In
current version, VF is not supported when PF is driven by hns3 PMD
driver. If maximum queue numbers allocated to PF PMD driver is less
than total tqps_num allocated to this port, all remaining number of
queues are mapped to VF function, which is unreasonable. So we fix
that all remaining number of queues are mapped to PF function.
Using RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF which comes from
configuration file to limit the queue number allocated to PF device
based on hns3 network engine with revision_id greater than 0x30. And
PF device still keep the maximum 64 queues based on hns3 network
engine with revision_id equals 0x21.
Remove restriction of the macro HNS3_MAX_TQP_NUM_PER_FUNC on the
maximum number of queues in hns3 VF PMD driver and use the value
allocated by hns3 PF kernel netdev driver.
2) According to the queue number allocated to PF device, a variable
array for Rx and Tx queue is dynamically allocated to record the
statistics of Rx and Tx queues during the .dev_init ops
implementation function.
3) Add an extended field in hns3_pf_res_cmd to support the case that
numbers of queue are greater than 1024.
4) Use new base address of Rx or Tx queue if QUEUE_ID of Rx or Tx queue
is greater than 1024.
5) Remove queue id mask and use all bits of actual queue_id as the
queue_id to configure hardware.
6) Currently, 0~9 bits of qset_id in hns3_nq_to_qs_link_cmd used to
record actual qset id and 10 bit as VLD bit are configured to
hardware. So we also need to use 11~15 bits when actual qset_id is
greater than 1024.
7) The number of queue sets based on different network engine are
different. We use it to calculate group number and configure to
hardware in the backpressure configuration.
8) Adding check operations for number of Rx and Tx queue user configured
when mapping queue to tc Rx queue numbers under a single TC must be
less than rss_size_max supported by a single TC. Rx and Tx queue
numbers are allocated to every TC by average. So Rx and Tx queue
numbers must be an integer multiple of 2, or redundant queues are not
available.
9) We can specify which packets enter the queue with a specific queue
number, when creating flow table rules by rte_flow API. Currently,
driver uses 0~9 bits to record the queue_id. So it is necessary to
extend one bit field to record queue_id and configure to hardware, if
the queue_id is greater than 1024.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
2020-09-29 12:01:10 +00:00
|
|
|
int hns3_fc_enable(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
|
2019-09-26 14:01:58 +00:00
|
|
|
|
net/hns3: maximize queue number
The maximum number of queues for hns3 PF and VF driver is 64 based on
hns3 network engine with revision_id equals 0x21. Based on hns3 network
engine with revision_id equals 0x30, the hns3 PF PMD driver can support
up to 1280 queues, and hns3 VF PMD driver can support up to 128 queues.
The following points need to be modified to support maximizing queue
number and maintain better compatibility:
1) Maximizing the number of queues for hns3 PF and VF PMD driver In
current version, VF is not supported when PF is driven by hns3 PMD
driver. If maximum queue numbers allocated to PF PMD driver is less
than total tqps_num allocated to this port, all remaining number of
queues are mapped to VF function, which is unreasonable. So we fix
that all remaining number of queues are mapped to PF function.
Using RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF which comes from
configuration file to limit the queue number allocated to PF device
based on hns3 network engine with revision_id greater than 0x30. And
PF device still keep the maximum 64 queues based on hns3 network
engine with revision_id equals 0x21.
Remove restriction of the macro HNS3_MAX_TQP_NUM_PER_FUNC on the
maximum number of queues in hns3 VF PMD driver and use the value
allocated by hns3 PF kernel netdev driver.
2) According to the queue number allocated to PF device, a variable
array for Rx and Tx queue is dynamically allocated to record the
statistics of Rx and Tx queues during the .dev_init ops
implementation function.
3) Add an extended field in hns3_pf_res_cmd to support the case that
numbers of queue are greater than 1024.
4) Use new base address of Rx or Tx queue if QUEUE_ID of Rx or Tx queue
is greater than 1024.
5) Remove queue id mask and use all bits of actual queue_id as the
queue_id to configure hardware.
6) Currently, 0~9 bits of qset_id in hns3_nq_to_qs_link_cmd used to
record actual qset id and 10 bit as VLD bit are configured to
hardware. So we also need to use 11~15 bits when actual qset_id is
greater than 1024.
7) The number of queue sets based on different network engine are
different. We use it to calculate group number and configure to
hardware in the backpressure configuration.
8) Adding check operations for number of Rx and Tx queue user configured
when mapping queue to tc Rx queue numbers under a single TC must be
less than rss_size_max supported by a single TC. Rx and Tx queue
numbers are allocated to every TC by average. So Rx and Tx queue
numbers must be an integer multiple of 2, or redundant queues are not
available.
9) We can specify which packets enter the queue with a specific queue
number, when creating flow table rules by rte_flow API. Currently,
driver uses 0~9 bits to record the queue_id. So it is necessary to
extend one bit field to record queue_id and configure to hardware, if
the queue_id is greater than 1024.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
2020-09-29 12:01:10 +00:00
|
|
|
int hns3_dcb_pfc_enable(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_pfc_conf *pfc_conf);
|
net/hns3: support different numbers of Rx and Tx queues
Hardware does not support individually enable/disable/reset the Tx or Rx
queue in hns3 network engine, driver must enable/disable/reset Tx and Rx
queues at the same time.
Currently, hns3 PMD driver does not support the scenarios as below:
1) When calling the following function, the input parameter nb_rx_q and
nb_tx_q are not equal.
rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q,
uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf);
2) When calling the following functions to setup queues, the
cumulatively setup Rx queues are not the same as the setup Tx queues.
rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,,,);
rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,,,);
However, these are common usage scenarios in some applications, such as,
l3fwd, ip_ressmbly and OVS-DPDK, etc.
This patch adds support for this usage of these functions by setup fake
Tx or Rx queues to adjust numbers of Tx/Rx queues. But these fake queues
are imperceptible, and can not be used by upper applications.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
2020-01-09 03:15:49 +00:00
|
|
|
|
net/hns3: maximize queue number
The maximum number of queues for hns3 PF and VF driver is 64 based on
hns3 network engine with revision_id equals 0x21. Based on hns3 network
engine with revision_id equals 0x30, the hns3 PF PMD driver can support
up to 1280 queues, and hns3 VF PMD driver can support up to 128 queues.
The following points need to be modified to support maximizing queue
number and maintain better compatibility:
1) Maximizing the number of queues for hns3 PF and VF PMD driver In
current version, VF is not supported when PF is driven by hns3 PMD
driver. If maximum queue numbers allocated to PF PMD driver is less
than total tqps_num allocated to this port, all remaining number of
queues are mapped to VF function, which is unreasonable. So we fix
that all remaining number of queues are mapped to PF function.
Using RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF which comes from
configuration file to limit the queue number allocated to PF device
based on hns3 network engine with revision_id greater than 0x30. And
PF device still keep the maximum 64 queues based on hns3 network
engine with revision_id equals 0x21.
Remove restriction of the macro HNS3_MAX_TQP_NUM_PER_FUNC on the
maximum number of queues in hns3 VF PMD driver and use the value
allocated by hns3 PF kernel netdev driver.
2) According to the queue number allocated to PF device, a variable
array for Rx and Tx queue is dynamically allocated to record the
statistics of Rx and Tx queues during the .dev_init ops
implementation function.
3) Add an extended field in hns3_pf_res_cmd to support the case that
numbers of queue are greater than 1024.
4) Use new base address of Rx or Tx queue if QUEUE_ID of Rx or Tx queue
is greater than 1024.
5) Remove queue id mask and use all bits of actual queue_id as the
queue_id to configure hardware.
6) Currently, 0~9 bits of qset_id in hns3_nq_to_qs_link_cmd used to
record actual qset id and 10 bit as VLD bit are configured to
hardware. So we also need to use 11~15 bits when actual qset_id is
greater than 1024.
7) The number of queue sets based on different network engine are
different. We use it to calculate group number and configure to
hardware in the backpressure configuration.
8) Adding check operations for number of Rx and Tx queue user configured
when mapping queue to tc Rx queue numbers under a single TC must be
less than rss_size_max supported by a single TC. Rx and Tx queue
numbers are allocated to every TC by average. So Rx and Tx queue
numbers must be an integer multiple of 2, or redundant queues are not
available.
9) We can specify which packets enter the queue with a specific queue
number, when creating flow table rules by rte_flow API. Currently,
driver uses 0~9 bits to record the queue_id. So it is necessary to
extend one bit field to record queue_id and configure to hardware, if
the queue_id is greater than 1024.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
2020-09-29 12:01:10 +00:00
|
|
|
int hns3_queue_to_tc_mapping(struct hns3_hw *hw, uint16_t nb_rx_q,
|
|
|
|
uint16_t nb_tx_q);
|
2019-09-26 14:01:58 +00:00
|
|
|
|
|
|
|
int hns3_dcb_cfg_update(struct hns3_adapter *hns);
|
|
|
|
|
|
|
|
#endif /* _HNS3_DCB_H_ */
|