net/hns3: add Rx interrupts compatibility

There are difference about queue's interrupt configurations for
different versions of hardware network engine, such as queue's interrupt
mapping mode, coalesce configuration, etc.

The following uses the configuration differences of the interrupt
mapping mode as an example.
1) For some versions of hardware network engine, such as kunpeng 920,
   because of the hardware constraint, we need implement unmmapping
   relationship configurations by binding all queues to the last
   interrupt vector and reserving the last interrupt vector. This
   results in a decrease of the maximum queues when upper applications
   call the rte_eth_dev_configure API function to enable Rx interrupt.
2) And for another versions, such as kunpeng 930, hns3 PMD driver can
   map/unmmap all interrupt vectors with queues when Rx interrupt is
   enabled.

This patch resolves configuration differences about Rx interrupts based
on kunpeng 920 and kunpeng 930.

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
This commit is contained in:
Wei Hu (Xavier) 2020-08-25 19:52:57 +08:00 committed by Ferruh Yigit
parent 9c740336f0
commit 27911a6e62
7 changed files with 123 additions and 33 deletions

View File

@ -368,21 +368,25 @@ struct hns3_func_status_cmd {
uint8_t rsv[2];
};
#define HNS3_VEC_NUM_S 0
#define HNS3_VEC_NUM_M GENMASK(7, 0)
#define HNS3_PF_VEC_NUM_S 0
#define HNS3_PF_VEC_NUM_M GENMASK(15, 0)
#define HNS3_MIN_VECTOR_NUM 2 /* one for msi-x, another for IO */
struct hns3_pf_res_cmd {
uint16_t tqp_num;
uint16_t buf_size;
uint16_t msixcap_localid_ba_nic;
uint16_t msixcap_localid_ba_rocee;
uint16_t pf_intr_vector_number;
uint16_t nic_pf_intr_vector_number;
uint16_t roce_pf_intr_vector_number;
uint16_t pf_own_fun_number;
uint16_t tx_buf_size;
uint16_t dv_buf_size;
uint32_t rsv[2];
uint16_t tqp_num_ext;
uint16_t roh_pf_intr_vector_number;
uint32_t rsv[1];
};
#define HNS3_VF_VEC_NUM_S 0
#define HNS3_VF_VEC_NUM_M GENMASK(7, 0)
struct hns3_vf_res_cmd {
uint16_t tqp_num;
uint16_t reserved;

View File

@ -2208,7 +2208,7 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
static int
hns3_init_ring_with_vector(struct hns3_hw *hw)
{
uint8_t vec;
uint16_t vec;
int ret;
int i;
@ -2219,27 +2219,23 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
* vector. In the initialization clearing the all hardware mapping
* relationship configurations between queues and interrupt vectors is
* needed, so some error caused by the residual configurations, such as
* the unexpected Tx interrupt, can be avoid. Because of the hardware
* constraints in hns3 hardware engine, we have to implement clearing
* the mapping relationship configurations by binding all queues to the
* last interrupt vector and reserving the last interrupt vector. This
* method results in a decrease of the maximum queues when upper
* applications call the rte_eth_dev_configure API function to enable
* Rx interrupt.
* the unexpected Tx interrupt, can be avoid.
*/
vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
/* vec - 1: the last interrupt is reserved */
hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1;
if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
vec = vec - 1; /* the last interrupt is reserved */
hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
for (i = 0; i < hw->intr_tqps_num; i++) {
/*
* Set gap limiter and rate limiter configuration of queue's
* interrupt.
* Set gap limiter/rate limiter/quanity limiter algorithm
* configuration for interrupt coalesce of queue's interrupt.
*/
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
ret = hns3_bind_ring_with_vector(hw, vec, false,
HNS3_RING_TYPE_TX, i);
@ -2669,8 +2665,8 @@ hns3_query_pf_resource(struct hns3_hw *hw)
pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
hw->num_msi =
hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
return 0;
}
@ -2885,6 +2881,9 @@ hns3_get_capability(struct hns3_hw *hw)
if (revision < PCI_REVISION_ID_HIP09_A) {
hns3_set_default_dev_specifications(hw);
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
return 0;
}
@ -2896,6 +2895,10 @@ hns3_get_capability(struct hns3_hw *hw)
return ret;
}
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
return 0;
}

View File

@ -359,6 +359,59 @@ struct hns3_reset_data {
struct hns3_wait_data *wait_data;
};
#define HNS3_INTR_MAPPING_VEC_RSV_ONE 0
#define HNS3_INTR_MAPPING_VEC_ALL 1
#define HNS3_INTR_COALESCE_NON_QL 0
#define HNS3_INTR_COALESCE_QL 1
#define HNS3_INTR_COALESCE_GL_UINT_2US 0
#define HNS3_INTR_COALESCE_GL_UINT_1US 1
struct hns3_queue_intr {
/*
* interrupt mapping mode.
* value range:
* HNS3_INTR_MAPPING_VEC_RSV_ONE/HNS3_INTR_MAPPING_VEC_ALL
*
* - HNS3_INTR_MAPPING_VEC_RSV_ONE
* For some versions of hardware network engine, because of the
* hardware constraint, we need implement clearing the mapping
* relationship configurations by binding all queues to the last
* interrupt vector and reserving the last interrupt vector. This
* method results in a decrease of the maximum queues when upper
* applications call the rte_eth_dev_configure API function to
* enable Rx interrupt.
*
* - HNS3_INTR_MAPPING_VEC_ALL
* PMD driver can map/unmmap all interrupt vectors with queues When
* Rx interrupt in enabled.
*/
uint8_t mapping_mode;
/*
* interrupt coalesce mode.
* value range:
* HNS3_INTR_COALESCE_NON_QL/HNS3_INTR_COALESCE_QL
*
* - HNS3_INTR_COALESCE_NON_QL
* For some versions of hardware network engine, hardware doesn't
* support QL(quanity limiter) algorithm for interrupt coalesce
* of queue's interrupt.
*
* - HNS3_INTR_COALESCE_QL
* In this mode, hardware support QL(quanity limiter) algorithm for
* interrupt coalesce of queue's interrupt.
*/
uint8_t coalesce_mode;
/*
* The unit of GL(gap limiter) configuration for interrupt coalesce of
* queue's interrupt.
* value range:
* HNS3_INTR_COALESCE_GL_UINT_2US/HNS3_INTR_COALESCE_GL_UINT_1US
*/
uint8_t gl_unit;
};
struct hns3_hw {
struct rte_eth_dev_data *data;
void *io_base;
@ -411,6 +464,9 @@ struct hns3_hw {
uint32_t capability;
uint32_t max_tm_rate;
struct hns3_queue_intr intr;
uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
struct hns3_port_base_vlan_config port_base_vlan_cfg;

View File

@ -693,7 +693,7 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
static int
hns3vf_init_ring_with_vector(struct hns3_hw *hw)
{
uint8_t vec;
uint16_t vec;
int ret;
int i;
@ -704,27 +704,23 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw)
* vector. In the initialization clearing the all hardware mapping
* relationship configurations between queues and interrupt vectors is
* needed, so some error caused by the residual configurations, such as
* the unexpected Tx interrupt, can be avoid. Because of the hardware
* constraints in hns3 hardware engine, we have to implement clearing
* the mapping relationship configurations by binding all queues to the
* last interrupt vector and reserving the last interrupt vector. This
* method results in a decrease of the maximum queues when upper
* applications call the rte_eth_dev_configure API function to enable
* Rx interrupt.
* the unexpected Tx interrupt, can be avoid.
*/
vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
/* vec - 1: the last interrupt is reserved */
hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1;
if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
vec = vec - 1; /* the last interrupt is reserved */
hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
for (i = 0; i < hw->intr_tqps_num; i++) {
/*
* Set gap limiter and rate limiter configuration of queue's
* interrupt.
* Set gap limiter/rate limiter/quanity limiter algorithm
* configuration for interrupt coalesce of queue's interrupt.
*/
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
HNS3_TQP_INTR_GL_DEFAULT);
hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
ret = hns3vf_bind_ring_with_vector(hw, vec, false,
HNS3_RING_TYPE_TX, i);
@ -1134,6 +1130,9 @@ hns3vf_get_capability(struct hns3_hw *hw)
if (revision < PCI_REVISION_ID_HIP09_A) {
hns3vf_set_default_dev_specifications(hw);
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
return 0;
}
@ -1145,6 +1144,10 @@ hns3vf_get_capability(struct hns3_hw *hw)
return ret;
}
hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
return 0;
}
@ -1616,7 +1619,7 @@ hns3_query_vf_resource(struct hns3_hw *hw)
req = (struct hns3_vf_res_cmd *)desc.data;
num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
if (num_msi < HNS3_MIN_VECTOR_NUM) {
hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
num_msi, HNS3_MIN_VECTOR_NUM);

View File

@ -92,13 +92,17 @@
#define HNS3_TQP_INTR_GL1_REG 0x20200
#define HNS3_TQP_INTR_GL2_REG 0x20300
#define HNS3_TQP_INTR_RL_REG 0x20900
#define HNS3_TQP_INTR_TX_QL_REG 0x20e00
#define HNS3_TQP_INTR_RX_QL_REG 0x20f00
#define HNS3_TQP_INTR_REG_SIZE 4
#define HNS3_TQP_INTR_GL_MAX 0x1FE0
#define HNS3_TQP_INTR_GL_DEFAULT 20
#define HNS3_TQP_INTR_GL_UNIT_1US BIT(31)
#define HNS3_TQP_INTR_RL_MAX 0xEC
#define HNS3_TQP_INTR_RL_ENABLE_MASK 0x40
#define HNS3_TQP_INTR_RL_DEFAULT 0
#define HNS3_TQP_INTR_QL_DEFAULT 0
/* gl_usec convert to hardware count, as writing each 1 represents 2us */
#define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1)

View File

@ -536,6 +536,9 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
return;
addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
else
value = HNS3_GL_USEC_TO_REG(gl_value);
hns3_write_dev(hw, addr, value);
@ -557,6 +560,21 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
hns3_write_dev(hw, addr, value);
}
void
hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
{
uint32_t addr;
if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
return;
addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
hns3_write_dev(hw, addr, ql_value);
addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
hns3_write_dev(hw, addr, ql_value);
}
static void
hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
{

View File

@ -397,6 +397,8 @@ void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
uint8_t gl_idx, uint16_t gl_value);
void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
uint16_t rl_value);
void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
uint16_t ql_value);
int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
uint16_t nb_tx_q);
int hns3_config_gro(struct hns3_hw *hw, bool en);