2019-09-26 14:02:00 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2021-04-07 09:19:13 +00:00
|
|
|
* Copyright(c) 2018-2021 HiSilicon Limited.
|
2019-09-26 14:02:00 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _HNS3_MBX_H_
|
|
|
|
#define _HNS3_MBX_H_
|
|
|
|
|
|
|
|
enum HNS3_MBX_OPCODE {
|
|
|
|
HNS3_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
|
|
|
|
HNS3_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */
|
|
|
|
HNS3_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
|
|
|
|
HNS3_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
|
|
|
|
HNS3_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
|
|
|
|
HNS3_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */
|
|
|
|
HNS3_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */
|
|
|
|
HNS3_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */
|
|
|
|
HNS3_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
|
|
|
|
HNS3_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
|
|
|
|
HNS3_MBX_GET_QINFO, /* (VF -> PF) get queue config */
|
|
|
|
HNS3_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */
|
|
|
|
HNS3_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
|
|
|
|
HNS3_MBX_GET_RETA, /* (VF -> PF) get RETA */
|
|
|
|
HNS3_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
|
|
|
|
HNS3_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
|
|
|
|
HNS3_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
|
|
|
|
HNS3_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
|
|
|
|
HNS3_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
|
|
|
|
HNS3_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
|
|
|
|
HNS3_MBX_SET_AESTART, /* (VF -> PF) start ae */
|
|
|
|
HNS3_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */
|
|
|
|
HNS3_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */
|
|
|
|
HNS3_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */
|
|
|
|
HNS3_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
|
|
|
|
HNS3_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
|
|
|
|
HNS3_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
|
|
|
|
HNS3_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
|
|
|
|
HNS3_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
|
|
|
|
HNS3_MBX_SET_MTU, /* (VF -> PF) set mtu */
|
|
|
|
HNS3_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
|
2019-11-22 12:06:20 +00:00
|
|
|
|
net/hns3: support setting VF PVID by PF driver
This patch adds support setting VF PVID by hns3 PF kernel ethdev driver
on the host by "ip link set <eth num> vf <vf id> vlan <vlan tag>"
command.
Because of the hardware constraints, the striped VLAN tag will always in
Rx descriptors which should has been dropped when PVID is enabled and
the PVID will overwrite the outer VLAN tag in Tx descriptor. So, hns3
PMD driver need to change the processing of VLAN tags in the process of
Tx and Rx according to whether PVID is enabled.
1) If the hns3 PF kernel ethdev driver sets the PVID for VF device
before the initialization of the related VF device, hns3 VF PMD
driver should get the PVID state from PF driver through mailbox and
update the related state in txq and rxq maintained by hns3 VF driver
to change the process of Tx and Rx.
2) If the hns3 PF kernel ethdev driver sets the PVID for VF device after
initialization of the related VF device, the PF driver will notify VF
driver to update the PVID state. The VF driver will update the PVID
configuration state immediately to ensure that the VLAN process in Tx
and Rx is correct. But in the window period of this state transition,
packets loss or packets with wrong VLAN may occur.
3) Due to hardware limitations, we only support two-layer VLAN hardware
offload in Tx direction based on hns3 network engine, so when PVID is
enabled, QinQ insert is no longer supported. And when PVID is
enabled, in the following two cases:
i) packets with more than two VLAN tags.
ii) packets with one VLAN tag while the hardware VLAN insert is
enabled.
The packets will be regarded as abnormal packets and discarded by
hardware in Tx direction. For debugging purposes, a validation check
for these types of packets is added to the '.tx_pkt_prepare' ops
implementation function named hns3_prep_pkts to inform users that
these packets will be discarded.
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
2020-07-01 11:54:36 +00:00
|
|
|
HNS3_MBX_PUSH_VLAN_INFO = 34, /* (PF -> VF) push port base vlan */
|
|
|
|
|
2020-03-09 09:32:42 +00:00
|
|
|
HNS3_MBX_PUSH_PROMISC_INFO = 36, /* (PF -> VF) push vf promisc info */
|
|
|
|
|
2019-11-22 12:06:20 +00:00
|
|
|
HNS3_MBX_HANDLE_VF_TBL = 38, /* (VF -> PF) store/clear hw cfg tbl */
|
2020-01-09 03:15:56 +00:00
|
|
|
HNS3_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */
|
2019-12-21 10:32:46 +00:00
|
|
|
HNS3_MBX_PUSH_LINK_STATUS = 201, /* (IMP -> PF) get port link status */
|
2019-09-26 14:02:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* below are per-VF mac-vlan subcodes */
|
|
|
|
enum hns3_mbx_mac_vlan_subcode {
|
|
|
|
HNS3_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */
|
|
|
|
HNS3_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */
|
|
|
|
HNS3_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */
|
|
|
|
HNS3_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
|
|
|
|
HNS3_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
|
|
|
|
HNS3_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* below are per-VF vlan cfg subcodes */
|
|
|
|
enum hns3_mbx_vlan_cfg_subcode {
|
|
|
|
HNS3_MBX_VLAN_FILTER = 0, /* set vlan filter */
|
|
|
|
HNS3_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
|
|
|
|
HNS3_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
|
net/hns3: support setting VF PVID by PF driver
This patch adds support setting VF PVID by hns3 PF kernel ethdev driver
on the host by "ip link set <eth num> vf <vf id> vlan <vlan tag>"
command.
Because of the hardware constraints, the striped VLAN tag will always in
Rx descriptors which should has been dropped when PVID is enabled and
the PVID will overwrite the outer VLAN tag in Tx descriptor. So, hns3
PMD driver need to change the processing of VLAN tags in the process of
Tx and Rx according to whether PVID is enabled.
1) If the hns3 PF kernel ethdev driver sets the PVID for VF device
before the initialization of the related VF device, hns3 VF PMD
driver should get the PVID state from PF driver through mailbox and
update the related state in txq and rxq maintained by hns3 VF driver
to change the process of Tx and Rx.
2) If the hns3 PF kernel ethdev driver sets the PVID for VF device after
initialization of the related VF device, the PF driver will notify VF
driver to update the PVID state. The VF driver will update the PVID
configuration state immediately to ensure that the VLAN process in Tx
and Rx is correct. But in the window period of this state transition,
packets loss or packets with wrong VLAN may occur.
3) Due to hardware limitations, we only support two-layer VLAN hardware
offload in Tx direction based on hns3 network engine, so when PVID is
enabled, QinQ insert is no longer supported. And when PVID is
enabled, in the following two cases:
i) packets with more than two VLAN tags.
ii) packets with one VLAN tag while the hardware VLAN insert is
enabled.
The packets will be regarded as abnormal packets and discarded by
hardware in Tx direction. For debugging purposes, a validation check
for these types of packets is added to the '.tx_pkt_prepare' ops
implementation function named hns3_prep_pkts to inform users that
these packets will be discarded.
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
2020-07-01 11:54:36 +00:00
|
|
|
HNS3_MBX_GET_PORT_BASE_VLAN_STATE = 4, /* get port based vlan state */
|
2019-09-26 14:02:00 +00:00
|
|
|
};
|
|
|
|
|
2019-11-22 12:06:20 +00:00
|
|
|
enum hns3_mbx_tbl_cfg_subcode {
|
|
|
|
HNS3_MBX_VPORT_LIST_CLEAR = 0,
|
|
|
|
};
|
|
|
|
|
2019-12-21 10:32:46 +00:00
|
|
|
enum hns3_mbx_link_fail_subcode {
|
|
|
|
HNS3_MBX_LF_NORMAL = 0,
|
|
|
|
HNS3_MBX_LF_REF_CLOCK_LOST,
|
|
|
|
HNS3_MBX_LF_XSFP_TX_DISABLE,
|
|
|
|
HNS3_MBX_LF_XSFP_ABSENT,
|
|
|
|
};
|
|
|
|
|
2019-09-26 14:02:00 +00:00
|
|
|
#define HNS3_MBX_MAX_MSG_SIZE 16
|
|
|
|
#define HNS3_MBX_MAX_RESP_DATA_SIZE 8
|
|
|
|
|
net/hns3: fix possible mismatched response of mailbox
Currently, the mailbox synchronous communication between VF and PF use
the following fields to maintain communication:
1. Req_msg_data which was combined by message code and subcode, used to
match request and response.
2. Head which means the number of requests successfully sent by VF.
3. Tail which means the number of responses successfully received by VF.
4. Lost which means the number of requests which are timeout.
There may possible mismatches of the following situation:
1. VF sends message A with code=1 subcode=1.
Then head=1, tail=0, lost=0.
2. PF was blocked about 500ms when processing the message A.
3. VF will detect message A timeout because it can't get the response
within 500ms.
Then head=1, tail=0, lost=1.
4. VF sends message B with code=1 subcode=1 which equal message A.
Then head=2, tail=0, lost=1.
5. PF processes the first message A and send the response message to VF.
6. VF will update tail field to 1, but the lost field will remain
unchanged because the code/subcode equal message B's, so driver will
return success because now the head(2) equals tail(1) plus lost(1).
This will lead to mismatch of request and response.
To fix the above bug, we use the following scheme:
1. The message sent from VF was labelled with match_id which was a
unique 16-bit non-zero value.
2. The response sent from PF will label with match_id which got from the
request.
3. The VF uses the match_id to match request and response message.
This scheme depends on the PF driver, if the PF driver don't support
then VF will uses the original scheme.
Fixes: 463e748964f5 ("net/hns3: support mailbox")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
2021-04-13 11:50:01 +00:00
|
|
|
enum {
|
|
|
|
HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL = 0,
|
|
|
|
HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID
|
|
|
|
};
|
|
|
|
|
2019-09-26 14:02:00 +00:00
|
|
|
struct hns3_mbx_resp_status {
|
|
|
|
rte_spinlock_t lock; /* protects against contending sync cmd resp */
|
net/hns3: fix possible mismatched response of mailbox
Currently, the mailbox synchronous communication between VF and PF use
the following fields to maintain communication:
1. Req_msg_data which was combined by message code and subcode, used to
match request and response.
2. Head which means the number of requests successfully sent by VF.
3. Tail which means the number of responses successfully received by VF.
4. Lost which means the number of requests which are timeout.
There may possible mismatches of the following situation:
1. VF sends message A with code=1 subcode=1.
Then head=1, tail=0, lost=0.
2. PF was blocked about 500ms when processing the message A.
3. VF will detect message A timeout because it can't get the response
within 500ms.
Then head=1, tail=0, lost=1.
4. VF sends message B with code=1 subcode=1 which equal message A.
Then head=2, tail=0, lost=1.
5. PF processes the first message A and send the response message to VF.
6. VF will update tail field to 1, but the lost field will remain
unchanged because the code/subcode equal message B's, so driver will
return success because now the head(2) equals tail(1) plus lost(1).
This will lead to mismatch of request and response.
To fix the above bug, we use the following scheme:
1. The message sent from VF was labelled with match_id which was a
unique 16-bit non-zero value.
2. The response sent from PF will label with match_id which got from the
request.
3. The VF uses the match_id to match request and response message.
This scheme depends on the PF driver, if the PF driver don't support
then VF will uses the original scheme.
Fixes: 463e748964f5 ("net/hns3: support mailbox")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
2021-04-13 11:50:01 +00:00
|
|
|
|
|
|
|
uint8_t matching_scheme;
|
|
|
|
|
|
|
|
/* The following fields used in the matching scheme for original */
|
2019-09-26 14:02:00 +00:00
|
|
|
uint32_t req_msg_data;
|
|
|
|
uint32_t head;
|
|
|
|
uint32_t tail;
|
|
|
|
uint32_t lost;
|
net/hns3: fix possible mismatched response of mailbox
Currently, the mailbox synchronous communication between VF and PF use
the following fields to maintain communication:
1. Req_msg_data which was combined by message code and subcode, used to
match request and response.
2. Head which means the number of requests successfully sent by VF.
3. Tail which means the number of responses successfully received by VF.
4. Lost which means the number of requests which are timeout.
There may possible mismatches of the following situation:
1. VF sends message A with code=1 subcode=1.
Then head=1, tail=0, lost=0.
2. PF was blocked about 500ms when processing the message A.
3. VF will detect message A timeout because it can't get the response
within 500ms.
Then head=1, tail=0, lost=1.
4. VF sends message B with code=1 subcode=1 which equal message A.
Then head=2, tail=0, lost=1.
5. PF processes the first message A and send the response message to VF.
6. VF will update tail field to 1, but the lost field will remain
unchanged because the code/subcode equal message B's, so driver will
return success because now the head(2) equals tail(1) plus lost(1).
This will lead to mismatch of request and response.
To fix the above bug, we use the following scheme:
1. The message sent from VF was labelled with match_id which was a
unique 16-bit non-zero value.
2. The response sent from PF will label with match_id which got from the
request.
3. The VF uses the match_id to match request and response message.
This scheme depends on the PF driver, if the PF driver don't support
then VF will uses the original scheme.
Fixes: 463e748964f5 ("net/hns3: support mailbox")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
2021-04-13 11:50:01 +00:00
|
|
|
|
|
|
|
/* The following fields used in the matching scheme for match_id */
|
|
|
|
uint16_t match_id;
|
|
|
|
bool received_match_resp;
|
|
|
|
|
2019-09-26 14:02:00 +00:00
|
|
|
int resp_status;
|
|
|
|
uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct errno_respcode_map {
|
|
|
|
uint16_t resp_code;
|
|
|
|
int err_no;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define HNS3_MBX_NEED_RESP_BIT BIT(0)
|
|
|
|
|
|
|
|
struct hns3_mbx_vf_to_pf_cmd {
|
|
|
|
uint8_t rsv;
|
|
|
|
uint8_t mbx_src_vfid; /* Auto filled by IMP */
|
|
|
|
uint8_t mbx_need_resp;
|
|
|
|
uint8_t rsv1;
|
|
|
|
uint8_t msg_len;
|
net/hns3: fix possible mismatched response of mailbox
Currently, the mailbox synchronous communication between VF and PF use
the following fields to maintain communication:
1. Req_msg_data which was combined by message code and subcode, used to
match request and response.
2. Head which means the number of requests successfully sent by VF.
3. Tail which means the number of responses successfully received by VF.
4. Lost which means the number of requests which are timeout.
There may possible mismatches of the following situation:
1. VF sends message A with code=1 subcode=1.
Then head=1, tail=0, lost=0.
2. PF was blocked about 500ms when processing the message A.
3. VF will detect message A timeout because it can't get the response
within 500ms.
Then head=1, tail=0, lost=1.
4. VF sends message B with code=1 subcode=1 which equal message A.
Then head=2, tail=0, lost=1.
5. PF processes the first message A and send the response message to VF.
6. VF will update tail field to 1, but the lost field will remain
unchanged because the code/subcode equal message B's, so driver will
return success because now the head(2) equals tail(1) plus lost(1).
This will lead to mismatch of request and response.
To fix the above bug, we use the following scheme:
1. The message sent from VF was labelled with match_id which was a
unique 16-bit non-zero value.
2. The response sent from PF will label with match_id which got from the
request.
3. The VF uses the match_id to match request and response message.
This scheme depends on the PF driver, if the PF driver don't support
then VF will uses the original scheme.
Fixes: 463e748964f5 ("net/hns3: support mailbox")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
2021-04-13 11:50:01 +00:00
|
|
|
uint8_t rsv2;
|
|
|
|
uint16_t match_id;
|
2019-09-26 14:02:00 +00:00
|
|
|
uint8_t msg[HNS3_MBX_MAX_MSG_SIZE];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hns3_mbx_pf_to_vf_cmd {
|
|
|
|
uint8_t dest_vfid;
|
|
|
|
uint8_t rsv[3];
|
|
|
|
uint8_t msg_len;
|
net/hns3: fix possible mismatched response of mailbox
Currently, the mailbox synchronous communication between VF and PF use
the following fields to maintain communication:
1. Req_msg_data which was combined by message code and subcode, used to
match request and response.
2. Head which means the number of requests successfully sent by VF.
3. Tail which means the number of responses successfully received by VF.
4. Lost which means the number of requests which are timeout.
There may possible mismatches of the following situation:
1. VF sends message A with code=1 subcode=1.
Then head=1, tail=0, lost=0.
2. PF was blocked about 500ms when processing the message A.
3. VF will detect message A timeout because it can't get the response
within 500ms.
Then head=1, tail=0, lost=1.
4. VF sends message B with code=1 subcode=1 which equal message A.
Then head=2, tail=0, lost=1.
5. PF processes the first message A and send the response message to VF.
6. VF will update tail field to 1, but the lost field will remain
unchanged because the code/subcode equal message B's, so driver will
return success because now the head(2) equals tail(1) plus lost(1).
This will lead to mismatch of request and response.
To fix the above bug, we use the following scheme:
1. The message sent from VF was labelled with match_id which was a
unique 16-bit non-zero value.
2. The response sent from PF will label with match_id which got from the
request.
3. The VF uses the match_id to match request and response message.
This scheme depends on the PF driver, if the PF driver don't support
then VF will uses the original scheme.
Fixes: 463e748964f5 ("net/hns3: support mailbox")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
2021-04-13 11:50:01 +00:00
|
|
|
uint8_t rsv1;
|
|
|
|
uint16_t match_id;
|
2019-09-26 14:02:00 +00:00
|
|
|
uint16_t msg[8];
|
|
|
|
};
|
|
|
|
|
2019-12-21 10:32:45 +00:00
|
|
|
struct hns3_ring_chain_param {
|
|
|
|
uint8_t ring_type;
|
|
|
|
uint8_t tqp_index;
|
|
|
|
uint8_t int_gl_index;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4
|
|
|
|
struct hns3_vf_bind_vector_msg {
|
|
|
|
uint8_t vector_id;
|
|
|
|
uint8_t ring_num;
|
|
|
|
struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM];
|
|
|
|
};
|
|
|
|
|
2019-09-26 14:02:00 +00:00
|
|
|
struct hns3_pf_rst_done_cmd {
|
|
|
|
uint8_t pf_rst_done;
|
|
|
|
uint8_t rsv[23];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define HNS3_PF_RESET_DONE_BIT BIT(0)
|
|
|
|
|
|
|
|
#define hns3_mbx_ring_ptr_move_crq(crq) \
|
|
|
|
((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num)
|
|
|
|
|
|
|
|
struct hns3_hw;
|
|
|
|
void hns3_dev_handle_mbx_msg(struct hns3_hw *hw);
|
|
|
|
int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
|
|
|
|
const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
|
|
|
|
uint8_t *resp_data, uint16_t resp_len);
|
|
|
|
#endif /* _HNS3_MBX_H_ */
|