net/hns3: support mailbox
This patch adds support for mailbox of hns3 PMD driver, mailbox is used for communication between PF and VF driver. Signed-off-by: Min Hu (Connor) <humin29@huawei.com> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com> Signed-off-by: Chunsong Feng <fengchunsong@huawei.com> Signed-off-by: Hao Chen <chenhao164@huawei.com> Signed-off-by: Huisong Li <lihuisong@huawei.com> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
parent
411d23b9ea
commit
463e748964
@ -24,6 +24,7 @@ LIBABIVER := 1
|
||||
#
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_ethdev.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_cmd.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_mbx.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_rss.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_flow.c
|
||||
SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_fdir.c
|
||||
|
@ -476,6 +476,9 @@ hns3_cmd_init(struct hns3_hw *hw)
|
||||
hw->cmq.csq.next_to_use = 0;
|
||||
hw->cmq.crq.next_to_clean = 0;
|
||||
hw->cmq.crq.next_to_use = 0;
|
||||
hw->mbx_resp.head = 0;
|
||||
hw->mbx_resp.tail = 0;
|
||||
hw->mbx_resp.lost = 0;
|
||||
hns3_cmd_init_regs(hw);
|
||||
|
||||
rte_spinlock_unlock(&hw->cmq.crq.lock);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <rte_alarm.h>
|
||||
|
||||
#include "hns3_cmd.h"
|
||||
#include "hns3_mbx.h"
|
||||
#include "hns3_rss.h"
|
||||
#include "hns3_fdir.h"
|
||||
|
||||
@ -332,6 +333,9 @@ struct hns3_hw {
|
||||
struct rte_eth_dev_data *data;
|
||||
void *io_base;
|
||||
struct hns3_cmq cmq;
|
||||
struct hns3_mbx_resp_status mbx_resp; /* mailbox response */
|
||||
struct hns3_mbx_arq_ring arq; /* mailbox async rx queue */
|
||||
pthread_t irq_thread_id;
|
||||
struct hns3_mac mac;
|
||||
unsigned int secondary_cnt; /* Number of secondary processes init'd. */
|
||||
uint32_t fw_version;
|
||||
|
337
drivers/net/hns3/hns3_mbx.c
Normal file
337
drivers/net/hns3/hns3_mbx.c
Normal file
@ -0,0 +1,337 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2018-2019 Hisilicon Limited.
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include <unistd.h>
|
||||
#include <rte_byteorder.h>
|
||||
#include <rte_common.h>
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_dev.h>
|
||||
#include <rte_ethdev_driver.h>
|
||||
#include <rte_io.h>
|
||||
#include <rte_spinlock.h>
|
||||
#include <rte_pci.h>
|
||||
#include <rte_bus_pci.h>
|
||||
|
||||
#include "hns3_ethdev.h"
|
||||
#include "hns3_regs.h"
|
||||
#include "hns3_logs.h"
|
||||
|
||||
#define HNS3_REG_MSG_DATA_OFFSET 4
|
||||
#define HNS3_CMD_CODE_OFFSET 2
|
||||
|
||||
static const struct errno_respcode_map err_code_map[] = {
|
||||
{0, 0},
|
||||
{1, -EPERM},
|
||||
{2, -ENOENT},
|
||||
{5, -EIO},
|
||||
{11, -EAGAIN},
|
||||
{12, -ENOMEM},
|
||||
{16, -EBUSY},
|
||||
{22, -EINVAL},
|
||||
{28, -ENOSPC},
|
||||
{95, -EOPNOTSUPP},
|
||||
};
|
||||
|
||||
static int
|
||||
hns3_resp_to_errno(uint16_t resp_code)
|
||||
{
|
||||
uint32_t i, num;
|
||||
|
||||
num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
|
||||
for (i = 0; i < num; i++) {
|
||||
if (err_code_map[i].resp_code == resp_code)
|
||||
return err_code_map[i].err_no;
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static void
|
||||
hns3_poll_all_sync_msg(void)
|
||||
{
|
||||
struct rte_eth_dev *eth_dev;
|
||||
struct hns3_adapter *adapter;
|
||||
const char *name;
|
||||
uint16_t port_id;
|
||||
|
||||
RTE_ETH_FOREACH_DEV(port_id) {
|
||||
eth_dev = &rte_eth_devices[port_id];
|
||||
name = eth_dev->device->driver->name;
|
||||
if (strcmp(name, "net_hns3") && strcmp(name, "net_hns3_vf"))
|
||||
continue;
|
||||
adapter = eth_dev->data->dev_private;
|
||||
if (!adapter || adapter->hw.adapter_state == HNS3_NIC_CLOSED)
|
||||
continue;
|
||||
/* Synchronous msg, the mbx_resp.req_msg_data is non-zero */
|
||||
if (adapter->hw.mbx_resp.req_msg_data)
|
||||
hns3_dev_handle_mbx_msg(&adapter->hw);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code0, uint16_t code1,
|
||||
uint8_t *resp_data, uint16_t resp_len)
|
||||
{
|
||||
#define HNS3_MAX_RETRY_MS 500
|
||||
struct hns3_mbx_resp_status *mbx_resp;
|
||||
bool in_irq = false;
|
||||
uint64_t now;
|
||||
uint64_t end;
|
||||
|
||||
if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
|
||||
hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)",
|
||||
resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
now = get_timeofday_ms();
|
||||
end = now + HNS3_MAX_RETRY_MS;
|
||||
while ((hw->mbx_resp.head != hw->mbx_resp.tail + hw->mbx_resp.lost) &&
|
||||
(now < end)) {
|
||||
/*
|
||||
* The mbox response is running on the interrupt thread.
|
||||
* Sending mbox in the interrupt thread cannot wait for the
|
||||
* response, so polling the mbox response on the irq thread.
|
||||
*/
|
||||
if (pthread_equal(hw->irq_thread_id, pthread_self())) {
|
||||
in_irq = true;
|
||||
hns3_poll_all_sync_msg();
|
||||
} else {
|
||||
rte_delay_ms(HNS3_POLL_RESPONE_MS);
|
||||
}
|
||||
now = get_timeofday_ms();
|
||||
}
|
||||
hw->mbx_resp.req_msg_data = 0;
|
||||
if (now >= end) {
|
||||
hw->mbx_resp.lost++;
|
||||
hns3_err(hw,
|
||||
"VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d",
|
||||
code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
|
||||
hw->mbx_resp.lost, in_irq);
|
||||
return -ETIME;
|
||||
}
|
||||
rte_io_rmb();
|
||||
mbx_resp = &hw->mbx_resp;
|
||||
|
||||
if (mbx_resp->resp_status)
|
||||
return mbx_resp->resp_status;
|
||||
|
||||
if (resp_data)
|
||||
memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
|
||||
const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
|
||||
uint8_t *resp_data, uint16_t resp_len)
|
||||
{
|
||||
struct hns3_mbx_vf_to_pf_cmd *req;
|
||||
struct hns3_cmd_desc desc;
|
||||
int ret;
|
||||
|
||||
req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
|
||||
|
||||
/* first two bytes are reserved for code & subcode */
|
||||
if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
|
||||
hns3_err(hw,
|
||||
"VF send mbx msg fail, msg len %d exceeds max payload len %d",
|
||||
msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
|
||||
req->msg[0] = code;
|
||||
req->msg[1] = subcode;
|
||||
if (msg_data)
|
||||
memcpy(&req->msg[HNS3_CMD_CODE_OFFSET], msg_data, msg_len);
|
||||
|
||||
/* synchronous send */
|
||||
if (need_resp) {
|
||||
req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
|
||||
rte_spinlock_lock(&hw->mbx_resp.lock);
|
||||
hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
|
||||
hw->mbx_resp.head++;
|
||||
ret = hns3_cmd_send(hw, &desc, 1);
|
||||
if (ret) {
|
||||
rte_spinlock_unlock(&hw->mbx_resp.lock);
|
||||
hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
|
||||
rte_spinlock_unlock(&hw->mbx_resp.lock);
|
||||
} else {
|
||||
/* asynchronous send */
|
||||
ret = hns3_cmd_send(hw, &desc, 1);
|
||||
if (ret) {
|
||||
hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
hns3_cmd_crq_empty(struct hns3_hw *hw)
|
||||
{
|
||||
uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
|
||||
|
||||
return tail == hw->cmq.crq.next_to_use;
|
||||
}
|
||||
|
||||
static void
|
||||
hns3_mbx_handler(struct hns3_hw *hw)
|
||||
{
|
||||
struct hns3_mac *mac = &hw->mac;
|
||||
enum hns3_reset_level reset_level;
|
||||
uint16_t *msg_q;
|
||||
uint32_t tail;
|
||||
|
||||
tail = hw->arq.tail;
|
||||
|
||||
/* process all the async queue messages */
|
||||
while (tail != hw->arq.head) {
|
||||
msg_q = hw->arq.msg_q[hw->arq.head];
|
||||
|
||||
switch (msg_q[0]) {
|
||||
case HNS3_MBX_LINK_STAT_CHANGE:
|
||||
memcpy(&mac->link_speed, &msg_q[2],
|
||||
sizeof(mac->link_speed));
|
||||
mac->link_status = rte_le_to_cpu_16(msg_q[1]);
|
||||
mac->link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
|
||||
break;
|
||||
case HNS3_MBX_ASSERTING_RESET:
|
||||
/* PF has asserted reset hence VF should go in pending
|
||||
* state and poll for the hardware reset status till it
|
||||
* has been completely reset. After this stack should
|
||||
* eventually be re-initialized.
|
||||
*/
|
||||
reset_level = rte_le_to_cpu_16(msg_q[1]);
|
||||
hns3_atomic_set_bit(reset_level, &hw->reset.pending);
|
||||
|
||||
hns3_warn(hw, "PF inform reset level %d", reset_level);
|
||||
hw->reset.stats.request_cnt++;
|
||||
break;
|
||||
default:
|
||||
hns3_err(hw, "Fetched unsupported(%d) message from arq",
|
||||
msg_q[0]);
|
||||
break;
|
||||
}
|
||||
|
||||
hns3_mbx_head_ptr_move_arq(hw->arq);
|
||||
msg_q = hw->arq.msg_q[hw->arq.head];
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Case1: receive response after timeout, req_msg_data
|
||||
* is 0, not equal resp_msg, do lost--
|
||||
* Case2: receive last response during new send_mbx_msg,
|
||||
* req_msg_data is different with resp_msg, let
|
||||
* lost--, continue to wait for response.
|
||||
*/
|
||||
static void
|
||||
hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
|
||||
{
|
||||
struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
|
||||
uint32_t tail = resp->tail + 1;
|
||||
|
||||
if (tail > resp->head)
|
||||
tail = resp->head;
|
||||
if (resp->req_msg_data != resp_msg) {
|
||||
if (resp->lost)
|
||||
resp->lost--;
|
||||
hns3_warn(hw, "Received a mismatched response req_msg(%x) "
|
||||
"resp_msg(%x) head(%d) tail(%d) lost(%d)",
|
||||
resp->req_msg_data, resp_msg, resp->head, tail,
|
||||
resp->lost);
|
||||
} else if (tail + resp->lost > resp->head) {
|
||||
resp->lost--;
|
||||
hns3_warn(hw, "Received a new response again resp_msg(%x) "
|
||||
"head(%d) tail(%d) lost(%d)", resp_msg,
|
||||
resp->head, tail, resp->lost);
|
||||
}
|
||||
rte_io_wmb();
|
||||
resp->tail = tail;
|
||||
}
|
||||
|
||||
void
|
||||
hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
|
||||
{
|
||||
struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
|
||||
struct hns3_cmq_ring *crq = &hw->cmq.crq;
|
||||
struct hns3_mbx_pf_to_vf_cmd *req;
|
||||
struct hns3_cmd_desc *desc;
|
||||
uint32_t msg_data;
|
||||
uint16_t *msg_q;
|
||||
uint16_t flag;
|
||||
uint8_t *temp;
|
||||
int i;
|
||||
|
||||
while (!hns3_cmd_crq_empty(hw)) {
|
||||
if (rte_atomic16_read(&hw->reset.disable_cmd))
|
||||
return;
|
||||
|
||||
desc = &crq->desc[crq->next_to_use];
|
||||
req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
|
||||
|
||||
flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
|
||||
if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
|
||||
hns3_warn(hw,
|
||||
"dropped invalid mailbox message, code = %d",
|
||||
req->msg[0]);
|
||||
|
||||
/* dropping/not processing this invalid message */
|
||||
crq->desc[crq->next_to_use].flag = 0;
|
||||
hns3_mbx_ring_ptr_move_crq(crq);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (req->msg[0]) {
|
||||
case HNS3_MBX_PF_VF_RESP:
|
||||
resp->resp_status = hns3_resp_to_errno(req->msg[3]);
|
||||
|
||||
temp = (uint8_t *)&req->msg[4];
|
||||
for (i = 0; i < HNS3_MBX_MAX_RESP_DATA_SIZE &&
|
||||
i < HNS3_REG_MSG_DATA_OFFSET; i++) {
|
||||
resp->additional_info[i] = *temp;
|
||||
temp++;
|
||||
}
|
||||
msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
|
||||
hns3_update_resp_position(hw, msg_data);
|
||||
break;
|
||||
case HNS3_MBX_LINK_STAT_CHANGE:
|
||||
case HNS3_MBX_ASSERTING_RESET:
|
||||
msg_q = hw->arq.msg_q[hw->arq.tail];
|
||||
memcpy(&msg_q[0], req->msg,
|
||||
HNS3_MBX_MAX_ARQ_MSG_SIZE * sizeof(uint16_t));
|
||||
hns3_mbx_tail_ptr_move_arq(hw->arq);
|
||||
|
||||
hns3_mbx_handler(hw);
|
||||
break;
|
||||
default:
|
||||
hns3_err(hw,
|
||||
"VF received unsupported(%d) mbx msg from PF",
|
||||
req->msg[0]);
|
||||
break;
|
||||
}
|
||||
|
||||
crq->desc[crq->next_to_use].flag = 0;
|
||||
hns3_mbx_ring_ptr_move_crq(crq);
|
||||
}
|
||||
|
||||
/* Write back CMDQ_RQ header pointer, IMP need this pointer */
|
||||
hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
|
||||
}
|
136
drivers/net/hns3/hns3_mbx.h
Normal file
136
drivers/net/hns3/hns3_mbx.h
Normal file
@ -0,0 +1,136 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2018-2019 Hisilicon Limited.
|
||||
*/
|
||||
|
||||
#ifndef _HNS3_MBX_H_
|
||||
#define _HNS3_MBX_H_
|
||||
|
||||
#define HNS3_MBX_VF_MSG_DATA_NUM 16
|
||||
|
||||
enum HNS3_MBX_OPCODE {
|
||||
HNS3_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
|
||||
HNS3_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */
|
||||
HNS3_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
|
||||
HNS3_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
|
||||
HNS3_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
|
||||
HNS3_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */
|
||||
HNS3_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */
|
||||
HNS3_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */
|
||||
HNS3_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
|
||||
HNS3_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
|
||||
HNS3_MBX_GET_QINFO, /* (VF -> PF) get queue config */
|
||||
HNS3_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */
|
||||
HNS3_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
|
||||
HNS3_MBX_GET_RETA, /* (VF -> PF) get RETA */
|
||||
HNS3_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
|
||||
HNS3_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
|
||||
HNS3_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
|
||||
HNS3_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
|
||||
HNS3_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
|
||||
HNS3_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
|
||||
HNS3_MBX_SET_AESTART, /* (VF -> PF) start ae */
|
||||
HNS3_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */
|
||||
HNS3_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */
|
||||
HNS3_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */
|
||||
HNS3_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
|
||||
HNS3_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
|
||||
HNS3_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
|
||||
HNS3_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
|
||||
HNS3_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
|
||||
HNS3_MBX_SET_MTU, /* (VF -> PF) set mtu */
|
||||
HNS3_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
|
||||
};
|
||||
|
||||
/* below are per-VF mac-vlan subcodes */
|
||||
enum hns3_mbx_mac_vlan_subcode {
|
||||
HNS3_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */
|
||||
HNS3_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */
|
||||
HNS3_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */
|
||||
HNS3_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
|
||||
HNS3_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
|
||||
HNS3_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
|
||||
};
|
||||
|
||||
/* below are per-VF vlan cfg subcodes */
|
||||
enum hns3_mbx_vlan_cfg_subcode {
|
||||
HNS3_MBX_VLAN_FILTER = 0, /* set vlan filter */
|
||||
HNS3_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
|
||||
HNS3_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
|
||||
};
|
||||
|
||||
#define HNS3_MBX_MAX_MSG_SIZE 16
|
||||
#define HNS3_MBX_MAX_RESP_DATA_SIZE 8
|
||||
#define HNS3_MBX_RING_MAP_BASIC_MSG_NUM 3
|
||||
#define HNS3_MBX_RING_NODE_VARIABLE_NUM 3
|
||||
|
||||
struct hns3_mbx_resp_status {
|
||||
rte_spinlock_t lock; /* protects against contending sync cmd resp */
|
||||
uint32_t req_msg_data;
|
||||
uint32_t head;
|
||||
uint32_t tail;
|
||||
uint32_t lost;
|
||||
int resp_status;
|
||||
uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE];
|
||||
};
|
||||
|
||||
struct errno_respcode_map {
|
||||
uint16_t resp_code;
|
||||
int err_no;
|
||||
};
|
||||
|
||||
#define HNS3_MBX_NEED_RESP_BIT BIT(0)
|
||||
|
||||
struct hns3_mbx_vf_to_pf_cmd {
|
||||
uint8_t rsv;
|
||||
uint8_t mbx_src_vfid; /* Auto filled by IMP */
|
||||
uint8_t mbx_need_resp;
|
||||
uint8_t rsv1;
|
||||
uint8_t msg_len;
|
||||
uint8_t rsv2[3];
|
||||
uint8_t msg[HNS3_MBX_MAX_MSG_SIZE];
|
||||
};
|
||||
|
||||
struct hns3_mbx_pf_to_vf_cmd {
|
||||
uint8_t dest_vfid;
|
||||
uint8_t rsv[3];
|
||||
uint8_t msg_len;
|
||||
uint8_t rsv1[3];
|
||||
uint16_t msg[8];
|
||||
};
|
||||
|
||||
struct hns3_vf_rst_cmd {
|
||||
uint8_t dest_vfid;
|
||||
uint8_t vf_rst;
|
||||
uint8_t rsv[22];
|
||||
};
|
||||
|
||||
struct hns3_pf_rst_done_cmd {
|
||||
uint8_t pf_rst_done;
|
||||
uint8_t rsv[23];
|
||||
};
|
||||
|
||||
#define HNS3_PF_RESET_DONE_BIT BIT(0)
|
||||
|
||||
/* used by VF to store the received Async responses from PF */
|
||||
struct hns3_mbx_arq_ring {
|
||||
#define HNS3_MBX_MAX_ARQ_MSG_SIZE 8
|
||||
#define HNS3_MBX_MAX_ARQ_MSG_NUM 1024
|
||||
uint32_t head;
|
||||
uint32_t tail;
|
||||
uint32_t count;
|
||||
uint16_t msg_q[HNS3_MBX_MAX_ARQ_MSG_NUM][HNS3_MBX_MAX_ARQ_MSG_SIZE];
|
||||
};
|
||||
|
||||
#define hns3_mbx_ring_ptr_move_crq(crq) \
|
||||
((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num)
|
||||
#define hns3_mbx_tail_ptr_move_arq(arq) \
|
||||
((arq).tail = ((arq).tail + 1) % HNS3_MBX_MAX_ARQ_MSG_SIZE)
|
||||
#define hns3_mbx_head_ptr_move_arq(arq) \
|
||||
((arq).head = ((arq).head + 1) % HNS3_MBX_MAX_ARQ_MSG_SIZE)
|
||||
|
||||
struct hns3_hw;
|
||||
void hns3_dev_handle_mbx_msg(struct hns3_hw *hw);
|
||||
int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
|
||||
const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
|
||||
uint8_t *resp_data, uint16_t resp_len);
|
||||
#endif /* _HNS3_MBX_H_ */
|
@ -18,6 +18,7 @@ sources = files('hns3_cmd.c',
|
||||
'hns3_ethdev.c',
|
||||
'hns3_fdir.c',
|
||||
'hns3_flow.c',
|
||||
'hns3_mbx.c',
|
||||
'hns3_rss.c',
|
||||
)
|
||||
deps += ['hash']
|
||||
|
Loading…
x
Reference in New Issue
Block a user