2018-04-02 22:34:32 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2021-01-27 16:09:43 +00:00
|
|
|
* Copyright(c) 2014-2021 Broadcom
|
2018-04-02 22:34:32 +00:00
|
|
|
* All rights reserved.
|
2016-06-15 21:23:08 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _BNXT_CPR_H_
|
|
|
|
#define _BNXT_CPR_H_
|
2017-06-01 17:07:00 +00:00
|
|
|
#include <stdbool.h>
|
2016-06-15 21:23:08 +00:00
|
|
|
|
2017-01-18 01:21:29 +00:00
|
|
|
#include <rte_io.h>
|
2021-07-09 16:38:48 +00:00
|
|
|
#include "hsi_struct_def_dpdk.h"
|
2017-01-18 01:21:29 +00:00
|
|
|
|
2019-06-02 17:42:42 +00:00
|
|
|
struct bnxt_db_info;
|
|
|
|
|
2016-06-15 21:23:08 +00:00
|
|
|
#define CMP_TYPE(cmp) \
|
|
|
|
(((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
|
|
|
|
|
2021-02-18 18:19:20 +00:00
|
|
|
/* Get completion length from completion type, in 16-byte units. */
|
|
|
|
#define CMP_LEN(cmp_type) (((cmp_type) & 1) + 1)
|
|
|
|
|
|
|
|
|
2016-06-15 21:23:08 +00:00
|
|
|
#define ADV_RAW_CMP(idx, n) ((idx) + (n))
|
|
|
|
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
|
|
|
|
#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask)
|
2018-06-28 20:15:28 +00:00
|
|
|
#define RING_CMPL(ring_mask, idx) ((idx) & (ring_mask))
|
2016-06-15 21:23:08 +00:00
|
|
|
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
|
|
|
|
|
|
|
|
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
|
|
|
|
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
|
|
|
|
|
|
|
|
#define B_CP_DB_REARM(cpr, raw_cons) \
|
2017-01-18 01:21:29 +00:00
|
|
|
rte_write32((DB_CP_REARM_FLAGS | \
|
2020-12-07 17:54:30 +00:00
|
|
|
DB_RING_IDX(&((cpr)->cp_db), raw_cons)), \
|
2019-06-02 17:42:42 +00:00
|
|
|
((cpr)->cp_db.doorbell))
|
|
|
|
|
|
|
|
#define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), \
|
|
|
|
((cpr)->cp_db.doorbell))
|
2016-06-15 21:23:08 +00:00
|
|
|
|
2019-06-02 17:42:42 +00:00
|
|
|
#define B_CP_DB_DISARM(cpr) (*(uint32_t *)((cpr)->cp_db.doorbell) = \
|
2017-06-01 17:07:00 +00:00
|
|
|
DB_KEY_CP | DB_IRQ_DIS)
|
|
|
|
|
|
|
|
#define B_CP_DB_IDX_ARM(cpr, cons) \
|
2019-06-02 17:42:42 +00:00
|
|
|
(*(uint32_t *)((cpr)->cp_db.doorbell) = (DB_CP_REARM_FLAGS | \
|
2017-06-01 17:07:00 +00:00
|
|
|
(cons)))
|
|
|
|
|
|
|
|
#define B_CP_DB_IDX_DISARM(cpr, cons) do { \
|
|
|
|
rte_smp_wmb(); \
|
2019-06-02 17:42:42 +00:00
|
|
|
(*(uint32_t *)((cpr)->cp_db.doorbell) = (DB_CP_FLAGS | \
|
2017-06-01 17:07:00 +00:00
|
|
|
(cons)); \
|
|
|
|
} while (0)
|
2016-06-15 21:23:08 +00:00
|
|
|
#define B_CP_DIS_DB(cpr, raw_cons) \
|
2020-05-22 17:42:09 +00:00
|
|
|
rte_write32_relaxed((DB_CP_FLAGS | \
|
2020-12-07 17:54:30 +00:00
|
|
|
DB_RING_IDX(&((cpr)->cp_db), raw_cons)), \
|
|
|
|
((cpr)->cp_db.doorbell))
|
2019-06-02 17:42:42 +00:00
|
|
|
|
2018-06-28 20:15:28 +00:00
|
|
|
#define B_CP_DB(cpr, raw_cons, ring_mask) \
|
|
|
|
rte_write32((DB_CP_FLAGS | \
|
|
|
|
RING_CMPL((ring_mask), raw_cons)), \
|
2019-06-02 17:42:42 +00:00
|
|
|
((cpr)->cp_db.doorbell))
|
|
|
|
|
|
|
|
struct bnxt_db_info {
|
2019-06-02 17:42:44 +00:00
|
|
|
void *doorbell;
|
|
|
|
union {
|
|
|
|
uint64_t db_key64;
|
|
|
|
uint32_t db_key32;
|
|
|
|
};
|
|
|
|
bool db_64;
|
2020-12-07 17:54:30 +00:00
|
|
|
uint32_t db_ring_mask;
|
2020-12-20 05:24:25 +00:00
|
|
|
uint32_t db_epoch_mask;
|
|
|
|
uint32_t db_epoch_shift;
|
2019-06-02 17:42:42 +00:00
|
|
|
};
|
2016-06-15 21:23:08 +00:00
|
|
|
|
2020-12-20 05:24:25 +00:00
|
|
|
#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
|
|
|
|
((db)->db_epoch_shift))
|
|
|
|
#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
|
|
|
|
DB_EPOCH(db, idx))
|
2020-12-07 17:54:30 +00:00
|
|
|
|
2016-06-15 21:23:08 +00:00
|
|
|
struct bnxt_ring;
|
|
|
|
struct bnxt_cp_ring_info {
|
|
|
|
uint32_t cp_raw_cons;
|
|
|
|
|
|
|
|
struct cmpl_base *cp_desc_ring;
|
2019-06-02 17:42:42 +00:00
|
|
|
struct bnxt_db_info cp_db;
|
2017-10-20 12:31:31 +00:00
|
|
|
rte_iova_t cp_desc_mapping;
|
2016-06-15 21:23:08 +00:00
|
|
|
|
|
|
|
struct ctx_hw_stats *hw_stats;
|
2017-10-20 12:31:31 +00:00
|
|
|
rte_iova_t hw_stats_map;
|
2016-06-15 21:23:08 +00:00
|
|
|
uint32_t hw_stats_ctx_id;
|
|
|
|
|
|
|
|
struct bnxt_ring *cp_ring_struct;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define RX_CMP_L2_ERRORS \
|
|
|
|
(RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR)
|
|
|
|
|
|
|
|
struct bnxt;
|
|
|
|
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
|
|
|
|
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
|
2018-05-22 18:13:44 +00:00
|
|
|
int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp);
|
2019-10-02 01:23:23 +00:00
|
|
|
void bnxt_dev_reset_and_resume(void *arg);
|
2019-10-02 01:23:31 +00:00
|
|
|
void bnxt_wait_for_device_shutdown(struct bnxt *bp);
|
2016-06-15 21:23:08 +00:00
|
|
|
|
2019-10-02 01:23:25 +00:00
|
|
|
#define EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL \
|
|
|
|
HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL
|
|
|
|
#define EVENT_DATA1_REASON_CODE_MASK \
|
|
|
|
HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK
|
|
|
|
|
2019-10-02 01:23:28 +00:00
|
|
|
#define EVENT_DATA1_FLAGS_MASK \
|
|
|
|
HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK
|
|
|
|
|
|
|
|
#define EVENT_DATA1_FLAGS_MASTER_FUNC \
|
|
|
|
HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC
|
|
|
|
|
|
|
|
#define EVENT_DATA1_FLAGS_RECOVERY_ENABLED \
|
|
|
|
HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED
|
|
|
|
|
|
|
|
bool bnxt_is_recovery_enabled(struct bnxt *bp);
|
2021-09-24 05:17:53 +00:00
|
|
|
bool bnxt_is_primary_func(struct bnxt *bp);
|
2019-10-02 01:23:28 +00:00
|
|
|
|
2020-10-26 03:56:16 +00:00
|
|
|
void bnxt_stop_rxtx(struct bnxt *bp);
|
2021-07-09 16:38:48 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Check validity of a completion ring entry. If the entry is valid, include a
|
|
|
|
* C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
|
|
|
|
* completion are not hoisted by the compiler or by the CPU to come before the
|
|
|
|
* loading of the "valid" field.
|
|
|
|
*
|
|
|
|
* Note: the caller must not access any fields in the specified completion
|
|
|
|
* entry prior to calling this function.
|
|
|
|
*
|
|
|
|
* @param cmpl
|
|
|
|
* Pointer to an entry in the completion ring.
|
|
|
|
* @param raw_cons
|
|
|
|
* Raw consumer index of entry in completion ring.
|
|
|
|
* @param ring_size
|
|
|
|
* Size of completion ring.
|
|
|
|
*/
|
|
|
|
static __rte_always_inline bool
|
|
|
|
bnxt_cpr_cmp_valid(const void *cmpl, uint32_t raw_cons, uint32_t ring_size)
|
|
|
|
{
|
|
|
|
const struct cmpl_base *c = cmpl;
|
|
|
|
bool expected, valid;
|
|
|
|
|
|
|
|
expected = !(raw_cons & ring_size);
|
|
|
|
valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
|
|
|
|
if (valid == expected) {
|
|
|
|
rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2016-06-15 21:23:08 +00:00
|
|
|
#endif
|