2018-04-02 22:34:32 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2014-2018 Broadcom
|
|
|
|
* All rights reserved.
|
2016-06-15 21:23:08 +00:00
|
|
|
*/
|
|
|
|
|
2016-06-15 21:23:15 +00:00
|
|
|
#include <rte_malloc.h>
|
2019-10-02 01:23:23 +00:00
|
|
|
#include <rte_alarm.h>
|
2019-10-02 01:23:31 +00:00
|
|
|
#include <rte_cycles.h>
|
2016-06-15 21:23:15 +00:00
|
|
|
|
2016-06-15 21:23:08 +00:00
|
|
|
#include "bnxt.h"
|
|
|
|
#include "bnxt_hwrm.h"
|
|
|
|
#include "bnxt_ring.h"
|
2016-06-15 21:23:12 +00:00
|
|
|
#include "hsi_struct_def_dpdk.h"
|
2016-06-15 21:23:08 +00:00
|
|
|
|
2019-10-02 01:23:31 +00:00
|
|
|
void bnxt_wait_for_device_shutdown(struct bnxt *bp)
|
|
|
|
{
|
|
|
|
uint32_t val, timeout;
|
|
|
|
|
|
|
|
/* if HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD is set
|
|
|
|
* in HWRM_FUNC_QCAPS command, wait for FW_STATUS to set
|
|
|
|
* the SHUTDOWN bit in health register
|
|
|
|
*/
|
|
|
|
if (!(bp->recovery_info &&
|
2019-12-10 15:05:29 +00:00
|
|
|
(bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)))
|
2019-10-02 01:23:31 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Driver has to wait for fw_reset_max_msecs or shutdown bit which comes
|
|
|
|
* first for FW to collect crash dump.
|
|
|
|
*/
|
|
|
|
timeout = bp->fw_reset_max_msecs;
|
|
|
|
|
|
|
|
/* Driver has to poll for shutdown bit in fw_status register
|
|
|
|
*
|
|
|
|
* 1. in case of hot fw upgrade, this bit will be set after all
|
|
|
|
* function drivers unregistered with fw.
|
|
|
|
* 2. in case of fw initiated error recovery, this bit will be
|
|
|
|
* set after fw has collected the core dump
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
val = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
|
|
|
|
if (val & BNXT_FW_STATUS_SHUTDOWN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rte_delay_ms(100);
|
|
|
|
timeout -= 100;
|
|
|
|
} while (timeout);
|
|
|
|
}
|
|
|
|
|
2020-07-29 14:04:59 +00:00
|
|
|
static void
|
|
|
|
bnxt_process_default_vnic_change(struct bnxt *bp,
|
|
|
|
struct hwrm_async_event_cmpl *async_cmp)
|
|
|
|
{
|
2020-10-09 11:11:24 +00:00
|
|
|
uint16_t vnic_state, vf_fid, vf_id;
|
net/bnxt: support representors on remote host domain
In the Stingray use case, representors are conventionally run
inside the SoC domain representing functions that are on the
X86 domain. In order to support this mechanism of building
representors for endpoints that are not in the same host domain,
additional dev args have been in the PMD like so:
rep-based-pf=<physical index> rep-is-pf=<VF=0 or PF=1>
where `rep-based-pf` specifies the physical index of the base PF
that the representor is derived off of.
Since representor(s) can be created for endpoint PFs as well,
rename struct bnxt_vf_representor to bnxt_representor and other such
dev_ops and function names.
devargs have also been extended to specify the exact CoS queue along
with flow control enablement to be used for the conduit between the
representor and the endpoint function.
This is how a sample devargs would look with all the extended devargs
-w 0000:06:02.0,host-based-truflow=1,representor=[1],rep-based-pf=8,
rep-is-pf=1,rep-q-r2f=1,rep-fc-r2f=0,rep-q-f2r=1,rep-fc-f2r=1
Call CFA_PAIR_ALLOC only in case of Stingray instead of CFA_VFR_ALLOC.
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
2020-09-22 07:06:30 +00:00
|
|
|
struct bnxt_representor *vf_rep_bp;
|
2020-07-29 14:04:59 +00:00
|
|
|
struct rte_eth_dev *eth_dev;
|
|
|
|
bool vfr_found = false;
|
|
|
|
uint32_t event_data;
|
|
|
|
|
|
|
|
if (!BNXT_TRUFLOW_EN(bp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
PMD_DRV_LOG(INFO, "Default vnic change async event received\n");
|
|
|
|
event_data = rte_le_to_cpu_32(async_cmp->event_data1);
|
|
|
|
|
|
|
|
vnic_state = (event_data & BNXT_DEFAULT_VNIC_STATE_MASK) >>
|
|
|
|
BNXT_DEFAULT_VNIC_STATE_SFT;
|
|
|
|
if (vnic_state != BNXT_DEFAULT_VNIC_ALLOC)
|
|
|
|
return;
|
|
|
|
|
2020-10-09 11:11:24 +00:00
|
|
|
if (!bp->rep_info)
|
2020-07-29 14:04:59 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >>
|
|
|
|
BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT;
|
|
|
|
PMD_DRV_LOG(INFO, "async event received vf_id 0x%x\n", vf_fid);
|
|
|
|
|
|
|
|
for (vf_id = 0; vf_id < BNXT_MAX_VF_REPS; vf_id++) {
|
|
|
|
eth_dev = bp->rep_info[vf_id].vfr_eth_dev;
|
|
|
|
if (!eth_dev)
|
|
|
|
continue;
|
|
|
|
vf_rep_bp = eth_dev->data->dev_private;
|
|
|
|
if (vf_rep_bp &&
|
|
|
|
vf_rep_bp->fw_fid == vf_fid) {
|
|
|
|
vfr_found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!vfr_found)
|
|
|
|
return;
|
|
|
|
|
net/bnxt: support representors on remote host domain
In the Stingray use case, representors are conventionally run
inside the SoC domain representing functions that are on the
X86 domain. In order to support this mechanism of building
representors for endpoints that are not in the same host domain,
additional dev args have been in the PMD like so:
rep-based-pf=<physical index> rep-is-pf=<VF=0 or PF=1>
where `rep-based-pf` specifies the physical index of the base PF
that the representor is derived off of.
Since representor(s) can be created for endpoint PFs as well,
rename struct bnxt_vf_representor to bnxt_representor and other such
dev_ops and function names.
devargs have also been extended to specify the exact CoS queue along
with flow control enablement to be used for the conduit between the
representor and the endpoint function.
This is how a sample devargs would look with all the extended devargs
-w 0000:06:02.0,host-based-truflow=1,representor=[1],rep-based-pf=8,
rep-is-pf=1,rep-q-r2f=1,rep-fc-r2f=0,rep-q-f2r=1,rep-fc-f2r=1
Call CFA_PAIR_ALLOC only in case of Stingray instead of CFA_VFR_ALLOC.
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
2020-09-22 07:06:30 +00:00
|
|
|
bnxt_rep_dev_start_op(eth_dev);
|
2020-07-29 14:04:59 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 21:23:08 +00:00
|
|
|
/*
|
|
|
|
* Async event handling
|
|
|
|
*/
|
2016-10-11 21:47:50 +00:00
|
|
|
void bnxt_handle_async_event(struct bnxt *bp,
|
2016-06-15 21:23:08 +00:00
|
|
|
struct cmpl_base *cmp)
|
|
|
|
{
|
|
|
|
struct hwrm_async_event_cmpl *async_cmp =
|
|
|
|
(struct hwrm_async_event_cmpl *)cmp;
|
2016-10-11 21:47:50 +00:00
|
|
|
uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id);
|
2019-10-02 01:23:28 +00:00
|
|
|
struct bnxt_error_recovery_info *info;
|
2019-10-02 01:23:25 +00:00
|
|
|
uint32_t event_data;
|
2016-06-15 21:23:08 +00:00
|
|
|
|
2016-10-11 21:47:50 +00:00
|
|
|
switch (event_id) {
|
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
|
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
|
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
|
2018-05-22 18:13:41 +00:00
|
|
|
/* FALLTHROUGH */
|
2020-10-06 16:01:56 +00:00
|
|
|
bnxt_link_update_op(bp->eth_dev, 0);
|
2016-06-15 21:23:08 +00:00
|
|
|
break;
|
2018-01-26 17:31:56 +00:00
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
|
|
|
|
PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
|
|
|
|
break;
|
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
|
|
|
|
PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
|
2019-06-08 19:22:05 +00:00
|
|
|
bnxt_hwrm_func_qcfg(bp, NULL);
|
2018-01-26 17:31:56 +00:00
|
|
|
break;
|
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
|
|
|
|
PMD_DRV_LOG(INFO, "Port conn async event\n");
|
|
|
|
break;
|
2019-10-02 01:23:23 +00:00
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
|
2020-10-26 03:56:16 +00:00
|
|
|
/*
|
|
|
|
* Avoid any rx/tx packet processing during firmware reset
|
|
|
|
* operation.
|
|
|
|
*/
|
|
|
|
bnxt_stop_rxtx(bp);
|
|
|
|
|
2020-02-20 04:12:14 +00:00
|
|
|
/* Ignore reset notify async events when stopping the port */
|
|
|
|
if (!bp->eth_dev->data->dev_started) {
|
|
|
|
bp->flags |= BNXT_FLAG_FATAL_ERROR;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-12-24 09:37:34 +00:00
|
|
|
pthread_mutex_lock(&bp->err_recovery_lock);
|
2019-10-02 01:23:25 +00:00
|
|
|
event_data = rte_le_to_cpu_32(async_cmp->event_data1);
|
2019-10-02 01:23:23 +00:00
|
|
|
/* timestamp_lo/hi values are in units of 100ms */
|
|
|
|
bp->fw_reset_max_msecs = async_cmp->timestamp_hi ?
|
|
|
|
rte_le_to_cpu_16(async_cmp->timestamp_hi) * 100 :
|
|
|
|
BNXT_MAX_FW_RESET_TIMEOUT;
|
|
|
|
bp->fw_reset_min_msecs = async_cmp->timestamp_lo ?
|
|
|
|
async_cmp->timestamp_lo * 100 :
|
|
|
|
BNXT_MIN_FW_READY_TIMEOUT;
|
2019-10-02 01:23:25 +00:00
|
|
|
if ((event_data & EVENT_DATA1_REASON_CODE_MASK) ==
|
|
|
|
EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) {
|
|
|
|
PMD_DRV_LOG(INFO,
|
|
|
|
"Firmware fatal reset event received\n");
|
|
|
|
bp->flags |= BNXT_FLAG_FATAL_ERROR;
|
|
|
|
} else {
|
|
|
|
PMD_DRV_LOG(INFO,
|
|
|
|
"Firmware non-fatal reset event received\n");
|
|
|
|
}
|
2019-10-02 01:23:23 +00:00
|
|
|
|
|
|
|
bp->flags |= BNXT_FLAG_FW_RESET;
|
2020-12-24 09:37:34 +00:00
|
|
|
pthread_mutex_unlock(&bp->err_recovery_lock);
|
2019-10-02 01:23:23 +00:00
|
|
|
rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
|
|
|
|
(void *)bp);
|
|
|
|
break;
|
2019-10-02 01:23:28 +00:00
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY:
|
|
|
|
info = bp->recovery_info;
|
|
|
|
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
PMD_DRV_LOG(INFO, "Error recovery async event received\n");
|
|
|
|
|
|
|
|
event_data = rte_le_to_cpu_32(async_cmp->event_data1) &
|
|
|
|
EVENT_DATA1_FLAGS_MASK;
|
|
|
|
|
|
|
|
if (event_data & EVENT_DATA1_FLAGS_MASTER_FUNC)
|
|
|
|
info->flags |= BNXT_FLAG_MASTER_FUNC;
|
|
|
|
else
|
|
|
|
info->flags &= ~BNXT_FLAG_MASTER_FUNC;
|
|
|
|
|
|
|
|
if (event_data & EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
|
|
|
|
info->flags |= BNXT_FLAG_RECOVERY_ENABLED;
|
|
|
|
else
|
|
|
|
info->flags &= ~BNXT_FLAG_RECOVERY_ENABLED;
|
|
|
|
|
|
|
|
PMD_DRV_LOG(INFO, "recovery enabled(%d), master function(%d)\n",
|
|
|
|
bnxt_is_recovery_enabled(bp),
|
|
|
|
bnxt_is_master_func(bp));
|
2019-10-02 01:23:29 +00:00
|
|
|
|
|
|
|
if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
|
|
|
|
return;
|
|
|
|
|
|
|
|
info->last_heart_beat =
|
|
|
|
bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
|
|
|
|
info->last_reset_counter =
|
|
|
|
bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
|
|
|
|
|
|
|
|
bnxt_schedule_fw_health_check(bp);
|
2019-10-02 01:23:28 +00:00
|
|
|
break;
|
2020-02-06 16:33:10 +00:00
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
|
|
|
|
PMD_DRV_LOG(INFO, "DNC event: evt_data1 %#x evt_data2 %#x\n",
|
|
|
|
rte_le_to_cpu_32(async_cmp->event_data1),
|
|
|
|
rte_le_to_cpu_32(async_cmp->event_data2));
|
|
|
|
break;
|
2020-07-29 14:04:59 +00:00
|
|
|
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE:
|
|
|
|
bnxt_process_default_vnic_change(bp, async_cmp);
|
|
|
|
break;
|
2016-06-15 21:23:08 +00:00
|
|
|
default:
|
2019-10-02 01:23:32 +00:00
|
|
|
PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
|
2016-06-15 21:23:08 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
|
|
|
|
{
|
2017-06-01 17:07:00 +00:00
|
|
|
struct hwrm_exec_fwd_resp_input *fwreq;
|
2016-06-15 21:23:08 +00:00
|
|
|
struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
|
|
|
|
struct input *fwd_cmd;
|
2017-06-01 17:07:00 +00:00
|
|
|
uint16_t fw_vf_id;
|
|
|
|
uint16_t vf_id;
|
|
|
|
uint16_t req_len;
|
|
|
|
int rc;
|
2016-06-15 21:23:08 +00:00
|
|
|
|
2020-05-15 11:10:41 +00:00
|
|
|
if (bp->pf->active_vfs <= 0) {
|
2018-01-26 17:31:55 +00:00
|
|
|
PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
|
2017-06-01 17:07:00 +00:00
|
|
|
return;
|
2016-06-15 21:23:08 +00:00
|
|
|
}
|
|
|
|
|
2017-06-01 17:07:00 +00:00
|
|
|
/* Qualify the fwd request */
|
|
|
|
fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
|
2020-05-15 11:10:41 +00:00
|
|
|
vf_id = fw_vf_id - bp->pf->first_vf_id;
|
2017-06-01 17:07:00 +00:00
|
|
|
|
|
|
|
req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
|
|
|
|
HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >>
|
|
|
|
HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
|
|
|
|
if (req_len > sizeof(fwreq->encap_request))
|
|
|
|
req_len = sizeof(fwreq->encap_request);
|
|
|
|
|
2016-06-15 21:23:08 +00:00
|
|
|
/* Locate VF's forwarded command */
|
2020-05-15 11:10:41 +00:00
|
|
|
fwd_cmd = (struct input *)bp->pf->vf_info[vf_id].req_buf;
|
2017-06-01 17:07:00 +00:00
|
|
|
|
2020-05-15 11:10:41 +00:00
|
|
|
if (fw_vf_id < bp->pf->first_vf_id ||
|
|
|
|
fw_vf_id >= bp->pf->first_vf_id + bp->pf->active_vfs) {
|
2018-01-26 17:31:55 +00:00
|
|
|
PMD_DRV_LOG(ERR,
|
2017-06-01 17:07:00 +00:00
|
|
|
"FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
|
2020-05-15 11:10:41 +00:00
|
|
|
fw_vf_id, bp->pf->first_vf_id,
|
|
|
|
(bp->pf->first_vf_id) + bp->pf->active_vfs - 1,
|
|
|
|
bp->pf->first_vf_id, bp->pf->active_vfs);
|
2016-06-15 21:23:08 +00:00
|
|
|
goto reject;
|
|
|
|
}
|
|
|
|
|
2020-10-09 11:11:21 +00:00
|
|
|
if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) {
|
2017-06-01 17:07:00 +00:00
|
|
|
/*
|
|
|
|
* In older firmware versions, the MAC had to be all zeros for
|
|
|
|
* the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
|
|
|
|
* zeros if it's being configured and has been ok'd by caller.
|
|
|
|
*/
|
|
|
|
if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) {
|
|
|
|
struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd;
|
|
|
|
|
|
|
|
if (vfc->enables &
|
|
|
|
HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) {
|
|
|
|
bnxt_hwrm_func_vf_mac(bp, vf_id,
|
|
|
|
(const uint8_t *)"\x00\x00\x00\x00\x00");
|
|
|
|
}
|
|
|
|
}
|
2020-10-09 11:11:21 +00:00
|
|
|
|
2017-06-01 17:07:00 +00:00
|
|
|
if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
|
|
|
|
struct hwrm_cfa_l2_set_rx_mask_input *srm =
|
|
|
|
(void *)fwd_cmd;
|
|
|
|
|
|
|
|
srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0);
|
|
|
|
srm->num_vlan_tags = rte_cpu_to_le_32(0);
|
|
|
|
srm->mask &= ~rte_cpu_to_le_32(
|
|
|
|
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY |
|
|
|
|
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
|
|
|
|
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
|
|
|
|
}
|
2020-10-09 11:11:21 +00:00
|
|
|
|
2017-06-01 17:07:00 +00:00
|
|
|
/* Forward */
|
|
|
|
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
|
|
|
|
if (rc) {
|
2018-01-26 17:31:55 +00:00
|
|
|
PMD_DRV_LOG(ERR,
|
2017-06-01 17:07:00 +00:00
|
|
|
"Failed to send FWD req VF 0x%x, type 0x%x.\n",
|
2020-05-15 11:10:41 +00:00
|
|
|
fw_vf_id - bp->pf->first_vf_id,
|
2017-06-01 17:07:00 +00:00
|
|
|
rte_le_to_cpu_16(fwd_cmd->req_type));
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2016-06-15 21:23:08 +00:00
|
|
|
|
|
|
|
reject:
|
2017-06-01 17:07:00 +00:00
|
|
|
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
|
|
|
|
if (rc) {
|
2018-01-26 17:31:55 +00:00
|
|
|
PMD_DRV_LOG(ERR,
|
2017-06-01 17:07:00 +00:00
|
|
|
"Failed to send REJECT req VF 0x%x, type 0x%x.\n",
|
2020-05-15 11:10:41 +00:00
|
|
|
fw_vf_id - bp->pf->first_vf_id,
|
2017-06-01 17:07:00 +00:00
|
|
|
rte_le_to_cpu_16(fwd_cmd->req_type));
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
2016-06-15 21:23:08 +00:00
|
|
|
}
|
|
|
|
|
2018-05-22 18:13:44 +00:00
|
|
|
int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
|
2017-06-01 17:07:00 +00:00
|
|
|
{
|
2018-05-22 18:13:44 +00:00
|
|
|
bool evt = 0;
|
2017-06-01 17:07:00 +00:00
|
|
|
|
2018-05-22 18:13:44 +00:00
|
|
|
if (bp == NULL || cmp == NULL) {
|
|
|
|
PMD_DRV_LOG(ERR, "invalid NULL argument\n");
|
|
|
|
return evt;
|
|
|
|
}
|
2016-09-26 16:18:52 +00:00
|
|
|
|
2019-10-02 01:23:22 +00:00
|
|
|
if (unlikely(is_bnxt_in_error(bp)))
|
|
|
|
return 0;
|
|
|
|
|
2018-05-22 18:13:44 +00:00
|
|
|
switch (CMP_TYPE(cmp)) {
|
|
|
|
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
|
|
|
|
/* Handle any async event */
|
|
|
|
bnxt_handle_async_event(bp, cmp);
|
|
|
|
evt = 1;
|
|
|
|
break;
|
2020-10-09 11:11:21 +00:00
|
|
|
case CMPL_BASE_TYPE_HWRM_FWD_REQ:
|
2018-05-22 18:13:44 +00:00
|
|
|
/* Handle HWRM forwarded responses */
|
|
|
|
bnxt_handle_fwd_req(bp, cmp);
|
|
|
|
evt = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Ignore any other events */
|
2019-10-02 01:23:32 +00:00
|
|
|
PMD_DRV_LOG(DEBUG, "Ignoring %02x completion\n", CMP_TYPE(cmp));
|
2018-05-22 18:13:44 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-06-15 21:23:08 +00:00
|
|
|
|
2018-05-22 18:13:44 +00:00
|
|
|
return evt;
|
2016-06-15 21:23:08 +00:00
|
|
|
}
|
2019-10-02 01:23:28 +00:00
|
|
|
|
|
|
|
bool bnxt_is_master_func(struct bnxt *bp)
|
|
|
|
{
|
|
|
|
if (bp->recovery_info->flags & BNXT_FLAG_MASTER_FUNC)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool bnxt_is_recovery_enabled(struct bnxt *bp)
|
|
|
|
{
|
|
|
|
struct bnxt_error_recovery_info *info;
|
|
|
|
|
|
|
|
info = bp->recovery_info;
|
|
|
|
if (info && (info->flags & BNXT_FLAG_RECOVERY_ENABLED))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2020-10-26 03:56:16 +00:00
|
|
|
|
|
|
|
void bnxt_stop_rxtx(struct bnxt *bp)
|
|
|
|
{
|
|
|
|
bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
|
|
|
|
bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
|
|
|
|
}
|