Upgrade the firmware carried in driver and loaded during hardware

initialization (a.k.a STORM firmware) to version 7.13.1 (latest version)
This commit is contained in:
David C Somayajulu 2016-02-25 22:44:00 +00:00
parent 6daa2c8751
commit 4ef8ebfd62
20 changed files with 105090 additions and 91486 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -28,7 +28,7 @@
__FBSDID("$FreeBSD$");
static const struct iro e1_iro_arr[379] = {
static const struct iro e1_iro_arr[385] = {
{ 0x40, 0x0, 0x0, 0x0, 0x0}, // COMMON_SB_SIZE
{ 0x40, 0x0, 0x0, 0x0, 0x0}, // COMMON_SB_DATA_SIZE
{ 0x28, 0x0, 0x0, 0x0, 0x0}, // COMMON_SP_SB_SIZE
@ -132,6 +132,10 @@ static const struct iro e1_iro_arr[379] = {
{ 0x19c8, 0x0, 0x0, 0x0, 0x8}, // TSTORM_COMMON_RTC_PARAMS_OFFSET
{ 0x2008, 0x10, 0x0, 0x0, 0x10}, // TSTORM_ASSERT_LIST_OFFSET(assertListEntry)
{ 0x2000, 0x0, 0x0, 0x0, 0x8}, // TSTORM_ASSERT_LIST_INDEX_OFFSET
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_MEASURE_PCI_LATENCY_CTRL_OFFSET
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_MEASURE_PCI_LATENCY_DATA_OFFSET
{ 0x0, 0x0, 0x0, 0x0, 0x0}, // TSTORM_AGG_MEASURE_PCI_LATENCY_INDEX
{ 0x0, 0x0, 0x0, 0x0, 0x0}, // TSTORM_AGG_MEASURE_PCI_LATENCY_COMP_TYPE
{ 0x4870, 0x8, 0x0, 0x0, 0x1}, // TSTORM_FUNC_EN_OFFSET(funcId)
{ 0x4871, 0x8, 0x0, 0x0, 0x1}, // TSTORM_VF_TO_PF_OFFSET(funcId)
{ 0x4872, 0x8, 0x0, 0x0, 0x1}, // TSTORM_RECORD_SLOW_PATH_OFFSET(funcId)
@ -234,6 +238,8 @@ static const struct iro e1_iro_arr[379] = {
{ 0x2508, 0x40, 0x0, 0x0, 0x20}, // TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId)
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId)
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_ACCEPT_CLASSIFY_FAIL_E2_ENABLE_OFFSET(portId)
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_ACCEPT_CLASSIFY_FAIL_E2_VNIC_OFFSET(portId)
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // USTORM_CQE_PAGE_NEXT_OFFSET(portId,clientId)
{ 0x3000, 0x0, 0x0, 0x0, 0x1000}, // USTORM_AGG_DATA_OFFSET
{ 0x50a1, 0x0, 0x0, 0x0, 0x1}, // USTORM_TPA_BTR_OFFSET

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -28,7 +28,7 @@
__FBSDID("$FreeBSD$");
static const struct iro e1h_iro_arr[379] = {
static const struct iro e1h_iro_arr[385] = {
{ 0x40, 0x0, 0x0, 0x0, 0x0}, // COMMON_SB_SIZE
{ 0x40, 0x0, 0x0, 0x0, 0x0}, // COMMON_SB_DATA_SIZE
{ 0x28, 0x0, 0x0, 0x0, 0x0}, // COMMON_SP_SB_SIZE
@ -132,6 +132,10 @@ static const struct iro e1h_iro_arr[379] = {
{ 0x1ec8, 0x0, 0x0, 0x0, 0x8}, // TSTORM_COMMON_RTC_PARAMS_OFFSET
{ 0x2008, 0x10, 0x0, 0x0, 0x10}, // TSTORM_ASSERT_LIST_OFFSET(assertListEntry)
{ 0x2000, 0x0, 0x0, 0x0, 0x8}, // TSTORM_ASSERT_LIST_INDEX_OFFSET
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_MEASURE_PCI_LATENCY_CTRL_OFFSET
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_MEASURE_PCI_LATENCY_DATA_OFFSET
{ 0x0, 0x0, 0x0, 0x0, 0x0}, // TSTORM_AGG_MEASURE_PCI_LATENCY_INDEX
{ 0x0, 0x0, 0x0, 0x0, 0x0}, // TSTORM_AGG_MEASURE_PCI_LATENCY_COMP_TYPE
{ 0xa080, 0x8, 0x0, 0x0, 0x1}, // TSTORM_FUNC_EN_OFFSET(funcId)
{ 0xa081, 0x8, 0x0, 0x0, 0x1}, // TSTORM_VF_TO_PF_OFFSET(funcId)
{ 0xa082, 0x8, 0x0, 0x0, 0x1}, // TSTORM_RECORD_SLOW_PATH_OFFSET(funcId)
@ -234,6 +238,8 @@ static const struct iro e1h_iro_arr[379] = {
{ 0x3008, 0x40, 0x0, 0x0, 0x20}, // TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId)
{ 0x3200, 0x20, 0x0, 0x0, 0x20}, // TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId)
{ 0x37a8, 0x0, 0x0, 0x0, 0x8}, // TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_ACCEPT_CLASSIFY_FAIL_E2_ENABLE_OFFSET(portId)
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_ACCEPT_CLASSIFY_FAIL_E2_VNIC_OFFSET(portId)
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // USTORM_CQE_PAGE_NEXT_OFFSET(portId,clientId)
{ 0xa000, 0x0, 0x0, 0x0, 0x2000}, // USTORM_AGG_DATA_OFFSET
{ 0x3ec1, 0x0, 0x0, 0x0, 0x1}, // USTORM_TPA_BTR_OFFSET

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -28,7 +28,7 @@
__FBSDID("$FreeBSD$");
static const struct iro e2_iro_arr[379] = {
static const struct iro e2_iro_arr[385] = {
{ 0x40, 0x0, 0x0, 0x0, 0x0}, // COMMON_SB_SIZE
{ 0x40, 0x0, 0x0, 0x0, 0x0}, // COMMON_SB_DATA_SIZE
{ 0x28, 0x0, 0x0, 0x0, 0x0}, // COMMON_SP_SB_SIZE
@ -132,6 +132,10 @@ static const struct iro e2_iro_arr[379] = {
{ 0x16c8, 0x0, 0x0, 0x0, 0x8}, // TSTORM_COMMON_RTC_PARAMS_OFFSET
{ 0x2008, 0x10, 0x0, 0x0, 0x10}, // TSTORM_ASSERT_LIST_OFFSET(assertListEntry)
{ 0x2000, 0x0, 0x0, 0x0, 0x8}, // TSTORM_ASSERT_LIST_INDEX_OFFSET
{ 0x1aa8, 0x0, 0x0, 0x0, 0x10}, // TSTORM_MEASURE_PCI_LATENCY_CTRL_OFFSET
{ 0x1ab8, 0x0, 0x0, 0x0, 0x10}, // TSTORM_MEASURE_PCI_LATENCY_DATA_OFFSET
{ 0x1, 0x0, 0x0, 0x0, 0x0}, // TSTORM_AGG_MEASURE_PCI_LATENCY_INDEX
{ 0x2, 0x0, 0x0, 0x0, 0x0}, // TSTORM_AGG_MEASURE_PCI_LATENCY_COMP_TYPE
{ 0x17e0, 0x8, 0x0, 0x0, 0x1}, // TSTORM_FUNC_EN_OFFSET(funcId)
{ 0x17e1, 0x8, 0x0, 0x0, 0x1}, // TSTORM_VF_TO_PF_OFFSET(funcId)
{ 0x17e2, 0x8, 0x0, 0x0, 0x1}, // TSTORM_RECORD_SLOW_PATH_OFFSET(funcId)
@ -233,7 +237,9 @@ static const struct iro e2_iro_arr[379] = {
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId)
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId)
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId)
{ 0x3128, 0x0, 0x0, 0x0, 0x8}, // TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET
{UNDEF_IRO, 0x0, 0x0, 0x0, 0x0}, // TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET
{ 0x3128, 0x8, 0x0, 0x0, 0x1}, // TSTORM_ACCEPT_CLASSIFY_FAIL_E2_ENABLE_OFFSET(portId)
{ 0x3129, 0x8, 0x0, 0x0, 0x1}, // TSTORM_ACCEPT_CLASSIFY_FAIL_E2_VNIC_OFFSET(portId)
{ 0x62a20, 0x2600, 0x40, 0x0, 0x8}, // USTORM_CQE_PAGE_NEXT_OFFSET(portId,clientId)
{ 0xa000, 0x0, 0x0, 0x0, 0x2000}, // USTORM_AGG_DATA_OFFSET
{ 0x40c1, 0x0, 0x0, 0x0, 0x1}, // USTORM_TPA_BTR_OFFSET
@ -396,14 +402,14 @@ static const struct iro e2_iro_arr[379] = {
{ 0xbcb0, 0x0, 0x0, 0x0, 0x4}, // XSTORM_DEBUG_ABTS_BLOCK_SQ_CNT_OFFSET
{ 0xbcb4, 0x0, 0x0, 0x0, 0x4}, // XSTORM_DEBUG_CLEANUP_BLOCK_SQ_CNT_OFFSET
{ 0xbcb0, 0x0, 0x0, 0x0, 0x48}, // XSTORM_DEBUG_OFFSET
{ 0xd858, 0x0, 0x0, 0x0, 0x4}, // TSTORM_STAT_FCOE_VER_CNT_OFFSET
{ 0xd850, 0x0, 0x0, 0x0, 0x4}, // TSTORM_STAT_FCOE_RX_PKT_CNT_OFFSET
{ 0xd854, 0x0, 0x0, 0x0, 0x4}, // TSTORM_STAT_FCOE_RX_BYTE_CNT_OFFSET
{ 0xd85c, 0x0, 0x0, 0x0, 0x4}, // TSTORM_STAT_FCOE_RX_DROP_PKT_CNT_OFFSET
{ 0xd850, 0x0, 0x0, 0x0, 0x10}, // TSTORM_STAT_OFFSET
{ 0xd840, 0x0, 0x0, 0x0, 0x4}, // TSTORM_PORT_DEBUG_WAIT_FOR_YOUR_TURN_SP_CNT_OFFSET
{ 0xd844, 0x0, 0x0, 0x0, 0x4}, // TSTORM_PORT_DEBUG_AFEX_ERROR_PACKETS_OFFSET
{ 0xd840, 0x0, 0x0, 0x0, 0x8}, // TSTORM_PORT_DEBUG_OFFSET
{ 0xd868, 0x0, 0x0, 0x0, 0x4}, // TSTORM_STAT_FCOE_VER_CNT_OFFSET
{ 0xd860, 0x0, 0x0, 0x0, 0x4}, // TSTORM_STAT_FCOE_RX_PKT_CNT_OFFSET
{ 0xd864, 0x0, 0x0, 0x0, 0x4}, // TSTORM_STAT_FCOE_RX_BYTE_CNT_OFFSET
{ 0xd86c, 0x0, 0x0, 0x0, 0x4}, // TSTORM_STAT_FCOE_RX_DROP_PKT_CNT_OFFSET
{ 0xd860, 0x0, 0x0, 0x0, 0x10}, // TSTORM_STAT_OFFSET
{ 0xd850, 0x0, 0x0, 0x0, 0x4}, // TSTORM_PORT_DEBUG_WAIT_FOR_YOUR_TURN_SP_CNT_OFFSET
{ 0xd854, 0x0, 0x0, 0x0, 0x4}, // TSTORM_PORT_DEBUG_AFEX_ERROR_PACKETS_OFFSET
{ 0xd850, 0x0, 0x0, 0x0, 0x8}, // TSTORM_PORT_DEBUG_OFFSET
{ 0xd4c8, 0x0, 0x0, 0x0, 0x8}, // TSTORM_REORDER_DATA_OFFSET
{ 0xd4d8, 0x0, 0x0, 0x0, 0x80}, // TSTORM_REORDER_WAITING_TABLE_OFFSET
{ 0x10, 0x0, 0x0, 0x0, 0x0}, // TSTORM_WAITING_LIST_SIZE

View File

@ -27,7 +27,7 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#define BXE_DRIVER_VERSION "1.78.79"
#define BXE_DRIVER_VERSION "1.78.81"
#include "bxe.h"
#include "ecore_sp.h"
@ -1458,14 +1458,14 @@ bxe_nvram_write(struct bxe_softc *sc,
/* copy command into DMAE command memory and set DMAE command Go */
void
bxe_post_dmae(struct bxe_softc *sc,
struct dmae_command *dmae,
struct dmae_cmd *dmae,
int idx)
{
uint32_t cmd_offset;
int i;
cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx));
for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) {
cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
}
@ -1476,14 +1476,14 @@ uint32_t
bxe_dmae_opcode_add_comp(uint32_t opcode,
uint8_t comp_type)
{
return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
DMAE_COMMAND_C_TYPE_ENABLE));
return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
DMAE_CMD_C_TYPE_ENABLE));
}
uint32_t
bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
{
return (opcode & ~DMAE_COMMAND_SRC_RESET);
return (opcode & ~DMAE_CMD_SRC_RESET);
}
uint32_t
@ -1495,17 +1495,17 @@ bxe_dmae_opcode(struct bxe_softc *sc,
{
uint32_t opcode = 0;
opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
(dst_type << DMAE_COMMAND_DST_SHIFT));
opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
(dst_type << DMAE_CMD_DST_SHIFT));
opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET);
opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) |
(SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT));
opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
(SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
#ifdef __BIG_ENDIAN
opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
@ -1522,11 +1522,11 @@ bxe_dmae_opcode(struct bxe_softc *sc,
static void
bxe_prep_dmae_with_comp(struct bxe_softc *sc,
struct dmae_command *dmae,
struct dmae_cmd *dmae,
uint8_t src_type,
uint8_t dst_type)
{
memset(dmae, 0, sizeof(struct dmae_command));
memset(dmae, 0, sizeof(struct dmae_cmd));
/* set the opcode */
dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
@ -1541,7 +1541,7 @@ bxe_prep_dmae_with_comp(struct bxe_softc *sc,
/* issue a DMAE command over the init channel and wait for completion */
static int
bxe_issue_dmae_with_comp(struct bxe_softc *sc,
struct dmae_command *dmae)
struct dmae_cmd *dmae)
{
uint32_t *wb_comp = BXE_SP(sc, wb_comp);
int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
@ -1587,7 +1587,7 @@ bxe_read_dmae(struct bxe_softc *sc,
uint32_t src_addr,
uint32_t len32)
{
struct dmae_command dmae;
struct dmae_cmd dmae;
uint32_t *data;
int i, rc;
@ -1627,7 +1627,7 @@ bxe_write_dmae(struct bxe_softc *sc,
uint32_t dst_addr,
uint32_t len32)
{
struct dmae_command dmae;
struct dmae_cmd dmae;
int rc;
if (!sc->dmae_ready) {
@ -2382,13 +2382,13 @@ bxe_sp_post(struct bxe_softc *sc,
/* CID needs port number to be encoded int it */
spe->hdr.conn_and_cmd_data =
htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid));
htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
/* TBD: Check if it works for VFs */
type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID);
type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
SPE_HDR_T_FUNCTION_ID);
spe->hdr.type = htole16(type);
@ -3050,7 +3050,7 @@ bxe_tpa_stop(struct bxe_softc *sc,
fp->eth_q_stats.rx_soft_errors++;
m_freem(m);
} else {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) {
if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
m->m_flags |= M_VLANTAG;
}
@ -3342,7 +3342,7 @@ bxe_rxeof(struct bxe_softc *sc,
}
/* if there is a VLAN tag then flag that info */
if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) {
if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
m->m_flags |= M_VLANTAG;
}
@ -9775,7 +9775,7 @@ bxe_init_rx_rings(struct bxe_softc *sc)
static void
bxe_init_tx_ring_one(struct bxe_fastpath *fp)
{
SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
fp->tx_db.data.zero_fill1 = 0;
fp->tx_db.data.prod = 0;
@ -10600,8 +10600,8 @@ bxe_func_start(struct bxe_softc *sc)
start_params->network_cos_mode = FW_WRR;
}
start_params->gre_tunnel_mode = 0;
start_params->gre_tunnel_rss = 0;
//start_params->gre_tunnel_mode = 0;
//start_params->gre_tunnel_rss = 0;
return (ecore_func_state_change(sc, &func_params));
}

View File

@ -111,6 +111,9 @@ __FBSDID("$FreeBSD$");
#include "bxe_elink.h"
#define VF_MAC_CREDIT_CNT 0
#define VF_VLAN_CREDIT_CNT (0)
#if __FreeBSD_version < 800054
#if defined(__i386__) || defined(__amd64__)
#define mb() __asm volatile("mfence;" : : : "memory")
@ -933,7 +936,7 @@ struct bxe_fw_stats_data {
struct bxe_slowpath {
/* used by the DMAE command executer */
struct dmae_command dmae[MAX_DMAE_C];
struct dmae_cmd dmae[MAX_DMAE_C];
/* statistics completion */
uint32_t stats_comp;
@ -1745,7 +1748,7 @@ struct bxe_softc {
struct bxe_net_stats_old net_stats_old;
struct bxe_fw_port_stats_old fw_stats_old;
struct dmae_command stats_dmae; /* used by dmae command loader */
struct dmae_cmd stats_dmae; /* used by dmae command loader */
int executer_idx;
int mtu;
@ -1984,21 +1987,21 @@ void bxe_reg_write32(struct bxe_softc *sc, bus_size_t offset, uint32_t val);
#define DMAE_COMP_REGULAR 0
#define DMAE_COM_SET_ERR 1
#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << DMAE_COMMAND_SRC_SHIFT)
#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << DMAE_COMMAND_SRC_SHIFT)
#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << DMAE_COMMAND_DST_SHIFT)
#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << DMAE_COMMAND_DST_SHIFT)
#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << DMAE_CMD_SRC_SHIFT)
#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << DMAE_CMD_SRC_SHIFT)
#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << DMAE_CMD_DST_SHIFT)
#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << DMAE_CMD_DST_SHIFT)
#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << DMAE_COMMAND_C_DST_SHIFT)
#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << DMAE_COMMAND_C_DST_SHIFT)
#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << DMAE_CMD_C_DST_SHIFT)
#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << DMAE_CMD_C_DST_SHIFT)
#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_CMD_ENDIANITY_SHIFT)
#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_CMD_ENDIANITY_SHIFT)
#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_CMD_ENDIANITY_SHIFT)
#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_CMD_ENDIANITY_SHIFT)
#define DMAE_CMD_PORT_0 0
#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
#define DMAE_CMD_PORT_1 DMAE_CMD_PORT
#define DMAE_SRC_PF 0
#define DMAE_SRC_VF 1
@ -2127,7 +2130,7 @@ uint32_t bxe_dmae_opcode_clr_src_reset(uint32_t opcode);
uint32_t bxe_dmae_opcode(struct bxe_softc *sc, uint8_t src_type,
uint8_t dst_type, uint8_t with_comp,
uint8_t comp_type);
void bxe_post_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int idx);
void bxe_post_dmae(struct bxe_softc *sc, struct dmae_cmd *dmae, int idx);
void bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr, uint32_t len32);
void bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr,
uint32_t dst_addr, uint32_t len32);

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -174,6 +174,9 @@ extern void elink_cb_dbg3(struct bxe_softc *sc, char *fmt, uint32_t arg1, uint3
#define ELINK_SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
#define ELINK_SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
#define ELINK_SFP_EEPROM_SFF_8472_COMP_SIZE 1
#define ELINK_SFP_EEPROM_VENDOR_SPECIFIC_ADDR 0x60
#define ELINK_SFP_EEPROM_VENDOR_SPECIFIC_SIZE 16
#define ELINK_SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
#define ELINK_SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
@ -304,6 +307,8 @@ struct elink_phy {
#define ELINK_SUPPORTED_Autoneg (1<<9)
#define ELINK_SUPPORTED_Pause (1<<10)
#define ELINK_SUPPORTED_Asym_Pause (1<<11)
#define ELINK_SUPPORTED_1000baseKX_Full (1<<17)
#define ELINK_SUPPORTED_10000baseKR_Full (1<<19)
#define ELINK_SUPPORTED_20000baseMLD2_Full (1<<21)
#define ELINK_SUPPORTED_20000baseKR2_Full (1<<22)
@ -413,6 +418,7 @@ struct elink_params {
#define ELINK_FEATURE_CONFIG_IEEE_PHY_TEST (1<<12)
#define ELINK_FEATURE_CONFIG_MT_SUPPORT (1<<13)
#define ELINK_FEATURE_CONFIG_BOOT_FROM_SAN (1<<14)
#define ELINK_FEATURE_CONFIG_DISABLE_PD (1<<15)
/* Will be populated during common init */
struct elink_phy phy[ELINK_MAX_PHYS];
@ -456,6 +462,9 @@ struct elink_params {
#define ELINK_LINK_FLAGS_INT_DISABLED (1<<0)
#define ELINK_PHY_INITIALIZED (1<<1)
uint32_t lfa_base;
/* The same definitions as the shmem2 parameter */
uint32_t link_attr_sync;
};
/* Output parameters */
@ -497,8 +506,7 @@ struct elink_vars {
uint8_t rx_tx_asic_rst;
uint8_t turn_to_run_wc_rt;
uint16_t rsrv2;
/* The same definitions as the shmem2 parameter */
uint32_t link_attr_sync;
};
/***********************************************************/
@ -674,14 +682,13 @@ void elink_pfc_statistic(struct elink_params *params, struct elink_vars *vars,
void elink_init_mod_abs_int(struct bxe_softc *sc, struct elink_vars *vars,
uint32_t chip_id, uint32_t shmem_base, uint32_t shmem2_base,
uint8_t port);
elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
struct elink_params *params);
//elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
// struct elink_params *params);
void elink_period_func(struct elink_params *params, struct elink_vars *vars);
elink_status_t elink_check_half_open_conn(struct elink_params *params,
struct elink_vars *vars, uint8_t notify);
//elink_status_t elink_check_half_open_conn(struct elink_params *params,
// struct elink_vars *vars, uint8_t notify);
void elink_enable_pmd_tx(struct elink_params *params);

View File

@ -178,7 +178,7 @@ bxe_storm_stats_post(struct bxe_softc *sc)
static void
bxe_hw_stats_post(struct bxe_softc *sc)
{
struct dmae_command *dmae = &sc->stats_dmae;
struct dmae_cmd *dmae = &sc->stats_dmae;
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
int loader_idx;
uint32_t opcode;
@ -201,15 +201,15 @@ bxe_hw_stats_post(struct bxe_softc *sc)
TRUE, DMAE_COMP_GRC);
opcode = bxe_dmae_opcode_clr_src_reset(opcode);
memset(dmae, 0, sizeof(struct dmae_command));
memset(dmae, 0, sizeof(struct dmae_cmd));
dmae->opcode = opcode;
dmae->src_addr_lo = U64_LO(BXE_SP_MAPPING(sc, dmae[0]));
dmae->src_addr_hi = U64_HI(BXE_SP_MAPPING(sc, dmae[0]));
dmae->dst_addr_lo = ((DMAE_REG_CMD_MEM +
sizeof(struct dmae_command) *
sizeof(struct dmae_cmd) *
(loader_idx + 1)) >> 2);
dmae->dst_addr_hi = 0;
dmae->len = sizeof(struct dmae_command) >> 2;
dmae->len = sizeof(struct dmae_cmd) >> 2;
if (CHIP_IS_E1(sc)) {
dmae->len--;
}
@ -251,7 +251,7 @@ bxe_stats_comp(struct bxe_softc *sc)
static void
bxe_stats_pmf_update(struct bxe_softc *sc)
{
struct dmae_command *dmae;
struct dmae_cmd *dmae;
uint32_t opcode;
int loader_idx = PMF_DMAE_C(sc);
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
@ -310,7 +310,7 @@ bxe_stats_pmf_update(struct bxe_softc *sc)
static void
bxe_port_stats_init(struct bxe_softc *sc)
{
struct dmae_command *dmae;
struct dmae_cmd *dmae;
int port = SC_PORT(sc);
uint32_t opcode;
int loader_idx = PMF_DMAE_C(sc);
@ -538,7 +538,7 @@ bxe_port_stats_init(struct bxe_softc *sc)
static void
bxe_func_stats_init(struct bxe_softc *sc)
{
struct dmae_command *dmae = &sc->stats_dmae;
struct dmae_cmd *dmae = &sc->stats_dmae;
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
/* sanity */
@ -548,7 +548,7 @@ bxe_func_stats_init(struct bxe_softc *sc)
}
sc->executer_idx = 0;
memset(dmae, 0, sizeof(struct dmae_command));
memset(dmae, 0, sizeof(struct dmae_cmd));
dmae->opcode = bxe_dmae_opcode(sc, DMAE_SRC_PCI, DMAE_DST_GRC,
TRUE, DMAE_COMP_PCI);
@ -1339,7 +1339,7 @@ bxe_stats_update(struct bxe_softc *sc)
static void
bxe_port_stats_stop(struct bxe_softc *sc)
{
struct dmae_command *dmae;
struct dmae_cmd *dmae;
uint32_t opcode;
int loader_idx = PMF_DMAE_C(sc);
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
@ -1466,7 +1466,7 @@ void bxe_stats_handle(struct bxe_softc *sc,
static void
bxe_port_stats_base_init(struct bxe_softc *sc)
{
struct dmae_command *dmae;
struct dmae_cmd *dmae;
uint32_t *stats_comp = BXE_SP(sc, stats_comp);
/* sanity */

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -31,161 +31,161 @@ __FBSDID("$FreeBSD$");
#define ECORE_FW_DEFS_H
#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base)
#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[147].base + ((assertListEntry) * IRO[147].m1))
(IRO[151].base + ((assertListEntry) * IRO[151].m1))
#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
(IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
IRO[153].m2))
(IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \
IRO[157].m2))
#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
(IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
IRO[154].m2))
(IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \
IRO[158].m2))
#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
(IRO[159].base + ((funcId) * IRO[159].m1))
(IRO[163].base + ((funcId) * IRO[163].m1))
#define CSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[149].base + ((funcId) * IRO[149].m1))
(IRO[153].base + ((funcId) * IRO[153].m1))
#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
(IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2))
(IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2))
#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
(IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \
* IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
(IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \
* IRO[142].m2) + ((sbId) * IRO[142].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
(IRO[323].base + ((pfId) * IRO[323].m1))
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[318].base + ((pfId) * IRO[318].m1))
(IRO[324].base + ((pfId) * IRO[324].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
(IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
(IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
(IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
(IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
(IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
(IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[316].base + ((pfId) * IRO[316].m1))
(IRO[322].base + ((pfId) * IRO[322].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[308].base + ((pfId) * IRO[308].m1))
(IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[307].base + ((pfId) * IRO[307].m1))
(IRO[313].base + ((pfId) * IRO[313].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1))
(IRO[312].base + ((pfId) * IRO[312].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
(IRO[155].base + ((funcId) * IRO[155].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
(IRO[142].base + ((pfId) * IRO[142].m1))
(IRO[146].base + ((pfId) * IRO[146].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
(IRO[143].base + ((pfId) * IRO[143].m1))
(IRO[147].base + ((pfId) * IRO[147].m1))
#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
(IRO[141].base + ((pfId) * IRO[141].m1))
#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
(IRO[145].base + ((pfId) * IRO[145].m1))
#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size)
#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
(IRO[144].base + ((pfId) * IRO[144].m1))
#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
(IRO[148].base + ((pfId) * IRO[148].m1))
#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size)
#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
(IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
(IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2))
#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
(IRO[133].base + ((sbId) * IRO[133].m1))
#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
(IRO[134].base + ((sbId) * IRO[134].m1))
#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
(IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
(IRO[132].base + ((sbId) * IRO[132].m1))
#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
(IRO[137].base + ((sbId) * IRO[137].m1))
#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
(IRO[138].base + ((sbId) * IRO[138].m1))
#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
(IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2))
#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
(IRO[136].base + ((sbId) * IRO[136].m1))
#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size)
#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
(IRO[141].base + ((sbId) * IRO[141].m1))
#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size)
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1))
#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
(IRO[154].base + ((funcId) * IRO[154].m1))
#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[208].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1))
(IRO[207].base + ((pfId) * IRO[207].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[101].base + ((assertListEntry) * IRO[101].m1))
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
(IRO[201].base + ((pfId) * IRO[201].m1))
(IRO[205].base + ((pfId) * IRO[205].m1))
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[103].base + ((funcId) * IRO[103].m1))
(IRO[107].base + ((funcId) * IRO[107].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[272].base + ((pfId) * IRO[272].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[271].base + ((pfId) * IRO[271].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[270].base + ((pfId) * IRO[270].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[269].base + ((pfId) * IRO[269].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
(IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[265].base + ((pfId) * IRO[265].m1))
#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[266].base + ((pfId) * IRO[266].m1))
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[267].base + ((pfId) * IRO[267].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[202].base + ((pfId) * IRO[202].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[105].base + ((funcId) * IRO[105].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
(IRO[217].base + ((pfId) * IRO[217].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[104].base + ((funcId) * IRO[104].m1))
#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
#define USTORM_AGG_DATA_SIZE (IRO[206].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
(IRO[319].base + ((pfId) * IRO[319].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[277].base + ((pfId) * IRO[277].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[275].base + ((pfId) * IRO[275].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
(IRO[284].base + ((pfId) * IRO[284].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[270].base + ((pfId) * IRO[270].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[271].base + ((pfId) * IRO[271].m1))
#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[272].base + ((pfId) * IRO[272].m1))
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[273].base + ((pfId) * IRO[273].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[206].base + ((pfId) * IRO[206].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[109].base + ((funcId) * IRO[109].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
(IRO[223].base + ((pfId) * IRO[223].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[108].base + ((funcId) * IRO[108].m1))
#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
#define USTORM_AGG_DATA_SIZE (IRO[212].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[180].base + ((assertListEntry) * IRO[180].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[187].base + ((portId) * IRO[187].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
(IRO[325].base + ((pfId) * IRO[325].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[182].base + ((funcId) * IRO[182].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[289].base + ((pfId) * IRO[289].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[288].base + ((pfId) * IRO[288].m1))
(IRO[294].base + ((pfId) * IRO[294].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
(IRO[285].base + ((pfId) * IRO[285].m1))
(IRO[291].base + ((pfId) * IRO[291].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
(IRO[292].base + ((pfId) * IRO[292].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
(IRO[186].base + ((pfId) * IRO[186].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[180].base + ((funcId) * IRO[180].m1))
(IRO[184].base + ((funcId) * IRO[184].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
(IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
IRO[209].m2))
(IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
IRO[215].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
(IRO[210].base + ((qzoneId) * IRO[210].m1))
#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
#define USTORM_TPA_BTR_SIZE (IRO[207].size)
(IRO[216].base + ((qzoneId) * IRO[216].m1))
#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
#define USTORM_TPA_BTR_SIZE (IRO[213].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[179].base + ((funcId) * IRO[179].m1))
(IRO[183].base + ((funcId) * IRO[183].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
@ -198,39 +198,39 @@ __FBSDID("$FreeBSD$");
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
(IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[307].base + ((pfId) * IRO[307].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[308].base + ((pfId) * IRO[308].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[309].base + ((pfId) * IRO[309].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[310].base + ((pfId) * IRO[310].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
(IRO[311].base + ((pfId) * IRO[311].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[295].base + ((pfId) * IRO[295].m1))
(IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[294].base + ((pfId) * IRO[294].m1))
(IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
(IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[298].base + ((pfId) * IRO[298].m1))
(IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
(IRO[297].base + ((pfId) * IRO[297].m1))
(IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
(IRO[292].base + ((pfId) * IRO[292].m1))
(IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[291].base + ((pfId) * IRO[291].m1))
(IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
(IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
(IRO[289].base + ((pfId) * IRO[289].m1))
(IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@ -243,17 +243,21 @@ __FBSDID("$FreeBSD$");
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
(IRO[211].base + ((portId) * IRO[211].m1))
(IRO[217].base + ((portId) * IRO[217].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IRO[212].base + ((portId) * IRO[212].m1))
(IRO[218].base + ((portId) * IRO[218].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
(IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
IRO[214].m2))
(IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
IRO[220].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE (IRO[7].base)
/* eth hsi version */
#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2)
/* Ethernet Ring parameters */
#define X_ETH_LOCAL_RING_SIZE 13
#define FIRST_BD_IN_PKT 0
@ -381,6 +385,7 @@ __FBSDID("$FreeBSD$");
/* used for Host Coallescing */
#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
@ -394,7 +399,7 @@ __FBSDID("$FreeBSD$");
#define MAX_COS_NUMBER 4
#define MAX_TRAFFIC_TYPES 8
#define MAX_PFC_PRIORITIES 8
#define MAX_VLAN_PRIORITIES 8
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -750,9 +750,9 @@ static inline void ecore_set_mcp_parity(struct bxe_softc *sc, uint8_t enable)
reg_val = REG_RD(sc, mcp_attn_ctl_regs[i].addr);
if (enable)
reg_val |= mcp_attn_ctl_regs[i].bits;
reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; /* Linux is using mcp_attn_ctl_regs[i].bits */
else
reg_val &= ~mcp_attn_ctl_regs[i].bits;
reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; /* Linux is using mcp_attn_ctl_regs[i].bits */
REG_WR(sc, mcp_attn_ctl_regs[i].addr, reg_val);
}

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -141,8 +141,6 @@ __FBSDID("$FreeBSD$");
0xc0a8UL
#define DBG_REG_DBG_PRTY_STS_CLR \
0xc0a0UL
#define DMAE_REG_BACKWARD_COMP_EN \
0x10207cUL
#define DMAE_REG_CMD_MEM \
0x102400UL
#define DMAE_REG_DMAE_INT_MASK \
@ -193,32 +191,10 @@ __FBSDID("$FreeBSD$");
0x170188UL
#define DORQ_REG_DPM_CID_OFST \
0x170030UL
#define DORQ_REG_MAX_RVFID_SIZE \
0x1701ecUL
#define DORQ_REG_NORM_CID_OFST \
0x17002cUL
#define DORQ_REG_PF_USAGE_CNT \
0x1701d0UL
#define DORQ_REG_VF_NORM_CID_BASE \
0x1701a0UL
#define DORQ_REG_VF_NORM_CID_OFST \
0x1701f4UL
#define DORQ_REG_VF_NORM_CID_WND_SIZE \
0x1701a4UL
#define DORQ_REG_VF_NORM_MAX_CID_COUNT \
0x1701e4UL
#define DORQ_REG_VF_NORM_VF_BASE \
0x1701a8UL
#define DORQ_REG_VF_TYPE_MASK_0 \
0x170218UL
#define DORQ_REG_VF_TYPE_MAX_MCID_0 \
0x1702d8UL
#define DORQ_REG_VF_TYPE_MIN_MCID_0 \
0x170298UL
#define DORQ_REG_VF_TYPE_VALUE_0 \
0x170258UL
#define DORQ_REG_VF_USAGE_CT_LIMIT \
0x170340UL
#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 \
(0x1<<4)
#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 \
@ -1040,7 +1016,7 @@ __FBSDID("$FreeBSD$");
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
0x9430UL
#define PGLUE_B_REG_LATCHED_ERRORS_CLR \
0x943CUL
0x943cUL
#define PGLUE_B_REG_PGLUE_B_INT_STS \
0x9298UL
#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR \
@ -1521,8 +1497,6 @@ __FBSDID("$FreeBSD$");
0x4047cUL
#define SRC_REG_LASTFREE0 \
0x40530UL
#define SRC_REG_NUMBER_HASH_BITS0 \
0x40400UL
#define SRC_REG_SOFT_RST \
0x4049cUL
#define SRC_REG_SRC_PRTY_MASK \
@ -1965,8 +1939,8 @@ __FBSDID("$FreeBSD$");
#define MISC_SPIO_SPIO5 0x20
#define HW_LOCK_MAX_RESOURCE_VALUE 31
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_MDIO 0
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
#define HW_LOCK_RESOURCE_NVRAM 12
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
@ -3261,6 +3235,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
#define MDIO_AN_REG_848xx_ID_MSB 0xffe2
#define BCM84858_PHY_ID 0x600d
#define MDIO_AN_REG_848xx_ID_LSB 0xffe3
#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
@ -3304,32 +3281,32 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
/* These are mailbox register set used by 84833. */
#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
/* These are mailbox register set used by 84833/84858. */
#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005
#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006
#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007
#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008
#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009
#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037
#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038
#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039
#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a
#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b
#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c
#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0)
#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26)
#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27)
#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28)
#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29)
#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30)
#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31)
/* Mailbox command set used by 84833. */
#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
#define PHY84833_CMD_GET_EEE_MODE 0x8008
#define PHY84833_CMD_SET_EEE_MODE 0x8009
#define PHY84833_CMD_GET_CURRENT_TEMP 0x8031
/* Mailbox status set used by 84833. */
/* Mailbox command set used by 84833/84858 */
#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001
#define PHY848xx_CMD_GET_EEE_MODE 0x8008
#define PHY848xx_CMD_SET_EEE_MODE 0x8009
#define PHY848xx_CMD_GET_CURRENT_TEMP 0x8031
/* Mailbox status set used by 84833 only */
#define PHY84833_STATUS_CMD_RECEIVED 0x0001
#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
@ -3339,6 +3316,18 @@ Theotherbitsarereservedandshouldbezero*/
#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
/* Mailbox Process */
#define PHY84833_MB_PROCESS1 1
#define PHY84833_MB_PROCESS2 2
#define PHY84833_MB_PROCESS3 3
/* Mailbox status set used by 84858 only */
#define PHY84858_STATUS_CMD_RECEIVED 0x0001
#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002
#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004
#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008
#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb
/* Warpcore clause 45 addressing */
@ -3365,6 +3354,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET 0x01
#define MDIO_WC_REG_TX0_TX_DRIVER_IFIR_MASK 0x000e
#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
@ -3382,6 +3373,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
#define MDIO_WC_REG_XGXSBLK2_LANE_RESET 0x810a
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
@ -3584,7 +3576,7 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_SEG_IDX_ATTN 2
#define IGU_SEG_IDX_DEFAULT 1
/* Fields of IGU PF CONFIGRATION REGISTER */
/* Fields of IGU PF CONFIGURATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
@ -3592,7 +3584,7 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
/* Fields of IGU VF CONFIGURATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -30,6 +30,9 @@ __FBSDID("$FreeBSD$");
#include "bxe.h"
#include "ecore_init.h"
/**** Exe Queue interfaces ****/
/**
@ -736,6 +739,26 @@ static int ecore_check_vlan_mac_add(struct bxe_softc *sc,
return ECORE_SUCCESS;
}
static int ecore_check_vxlan_fltr_add(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *o,
union ecore_classification_ramrod_data *data)
{
struct ecore_vlan_mac_registry_elem *pos;
ECORE_MSG(sc, "Checking VXLAN_FLTR (Inner:%pM, %d) for ADD command\n",
data->vxlan_fltr.innermac, data->vxlan_fltr.vni);
ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
struct ecore_vlan_mac_registry_elem)
if ((!ECORE_MEMCMP(data->vxlan_fltr.innermac,
pos->u.vxlan_fltr.innermac,
ETH_ALEN)) &&
(data->vxlan_fltr.vni == pos->u.vxlan_fltr.vni))
return ECORE_EXISTS;
return ECORE_SUCCESS;
}
/* check_del() callbacks */
static struct ecore_vlan_mac_registry_elem *
ecore_check_mac_del(struct bxe_softc *sc,
@ -794,6 +817,28 @@ static struct ecore_vlan_mac_registry_elem *
return NULL;
}
static struct ecore_vlan_mac_registry_elem *
ecore_check_vxlan_fltr_del
(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *o,
union ecore_classification_ramrod_data *data)
{
struct ecore_vlan_mac_registry_elem *pos;
ECORE_MSG(sc, "Checking VXLAN_FLTR (Inner:%pM, %d) for DEL command\n",
data->vxlan_fltr.innermac, data->vxlan_fltr.vni);
ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
struct ecore_vlan_mac_registry_elem)
if ((!ECORE_MEMCMP(data->vxlan_fltr.innermac,
pos->u.vxlan_fltr.innermac,
ETH_ALEN)) &&
(data->vxlan_fltr.vni == pos->u.vxlan_fltr.vni))
return pos;
return NULL;
}
/* check_move() callback */
static bool ecore_check_move(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *src_o,
@ -982,7 +1027,7 @@ static void ecore_set_one_mac_e2(struct bxe_softc *sc,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
rule_entry->mac.inner_mac =
elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
ECORE_CPU_TO_LE16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == ECORE_VLAN_MAC_MOVE) {
@ -1000,7 +1045,8 @@ static void ecore_set_one_mac_e2(struct bxe_softc *sc,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
rule_entry->mac.inner_mac =
elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
ECORE_CPU_TO_LE16(elem->cmd_data.vlan_mac.
u.mac.is_inner_mac);
}
/* Set the ramrod data header */
@ -1219,6 +1265,62 @@ static void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
rule_cnt);
}
static void ecore_set_one_vxlan_fltr_e2(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *o,
struct ecore_exeq_elem *elem,
int rule_idx, int cam_offset)
{
struct ecore_raw_obj *raw = &o->raw;
struct eth_classify_rules_ramrod_data *data =
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
uint32_t vni = elem->cmd_data.vlan_mac.u.vxlan_fltr.vni;
uint8_t *mac = elem->cmd_data.vlan_mac.u.vxlan_fltr.innermac;
/* Reset the ramrod data buffer for the first rule */
if (rule_idx == 0)
ECORE_MEMSET(data, 0, sizeof(*data));
/* Set a rule header */
ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add,
CLASSIFY_RULE_OPCODE_IMAC_VNI,
&rule_entry->imac_vni.header);
/* Set VLAN and MAC themselves */
rule_entry->imac_vni.vni = vni;
ecore_set_fw_mac_addr(&rule_entry->imac_vni.imac_msb,
&rule_entry->imac_vni.imac_mid,
&rule_entry->imac_vni.imac_lsb, mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == ECORE_VLAN_MAC_MOVE) {
rule_entry++;
rule_cnt++;
/* Setup ramrod data */
ecore_vlan_mac_set_cmd_hdr_e2(sc,
elem->cmd_data.vlan_mac.target_obj,
TRUE, CLASSIFY_RULE_OPCODE_IMAC_VNI,
&rule_entry->imac_vni.header);
/* Set a VLAN itself */
rule_entry->imac_vni.vni = vni;
ecore_set_fw_mac_addr(&rule_entry->imac_vni.imac_msb,
&rule_entry->imac_vni.imac_mid,
&rule_entry->imac_vni.imac_lsb, mac);
}
/* Set the ramrod data header */
/* TODO: take this to the higher level in order to prevent multiple
* writing
*/
ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state,
&data->header, rule_cnt);
}
/**
* ecore_set_one_vlan_mac_e1h -
*
@ -1376,6 +1478,26 @@ static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
return NULL;
}
static struct ecore_exeq_elem *ecore_exeq_get_vxlan_fltr
(struct ecore_exe_queue_obj *o,
struct ecore_exeq_elem *elem)
{
struct ecore_exeq_elem *pos;
struct ecore_vxlan_fltr_ramrod_data *data =
&elem->cmd_data.vlan_mac.u.vxlan_fltr;
/* Check pending for execution commands */
ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
struct ecore_exeq_elem)
if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vxlan_fltr, data,
sizeof(*data)) &&
(pos->cmd_data.vlan_mac.cmd ==
elem->cmd_data.vlan_mac.cmd))
return pos;
return NULL;
}
/**
* ecore_validate_vlan_mac_add - check if an ADD command can be executed
*
@ -1890,14 +2012,12 @@ static int ecore_execute_vlan_mac(struct bxe_softc *sc,
idx++;
}
/*
* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
r->rdata_mapping,
ETH_CONNECTION_TYPE);
@ -2084,6 +2204,7 @@ static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
struct ecore_vlan_mac_ramrod_params p;
struct ecore_exe_queue_obj *exeq = &o->exe_queue;
struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
unsigned long flags;
int read_lock;
int rc = 0;
@ -2094,8 +2215,9 @@ static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
&exeq->exe_queue, link,
struct ecore_exeq_elem) {
if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
*vlan_mac_flags) {
flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
rc = exeq->remove(sc, exeq->owner, exeq_pos);
if (rc) {
ECORE_ERR("Failed to remove command\n");
@ -2130,7 +2252,9 @@ static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
struct ecore_vlan_mac_registry_elem) {
if (pos->vlan_mac_flags == *vlan_mac_flags) {
flags = pos->vlan_mac_flags;
if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = ecore_config_vlan_mac(sc, &p);
@ -2350,6 +2474,54 @@ void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
}
}
void ecore_init_vxlan_fltr_obj(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *vlan_mac_obj,
uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
ecore_dma_addr_t rdata_mapping, int state,
unsigned long *pstate, ecore_obj_type type,
struct ecore_credit_pool_obj *macs_pool,
struct ecore_credit_pool_obj *vlans_pool)
{
union ecore_qable_obj *qable_obj =
(union ecore_qable_obj *)vlan_mac_obj;
ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id,
rdata, rdata_mapping, state, pstate,
type, macs_pool, vlans_pool);
/* CAM pool handling */
vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
/* CAM offset is relevant for 57710 and 57711 chips only which have a
* single CAM for both MACs and VLAN-MAC pairs. So the offset
* will be taken from MACs' pool object only.
*/
vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
if (CHIP_IS_E1x(sc)) {
ECORE_ERR("Do not support chips others than E2/E3\n");
ECORE_BUG();
} else {
vlan_mac_obj->set_one_rule = ecore_set_one_vxlan_fltr_e2;
vlan_mac_obj->check_del = ecore_check_vxlan_fltr_del;
vlan_mac_obj->check_add = ecore_check_vxlan_fltr_add;
vlan_mac_obj->check_move = ecore_check_move;
vlan_mac_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
/* Exe Queue */
ecore_exe_queue_init(sc,
&vlan_mac_obj->exe_queue,
CLASSIFY_RULES_COUNT,
qable_obj, ecore_validate_vlan_mac,
ecore_remove_vlan_mac,
ecore_optimize_vlan_mac,
ecore_execute_vlan_mac,
ecore_exeq_get_vxlan_fltr);
}
}
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bxe_softc *sc,
struct tstorm_eth_mac_filter_config *mac_filters,
@ -2583,11 +2755,11 @@ static int ecore_set_rx_mode_e2(struct bxe_softc *sc,
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* Send a ramrod */
@ -3294,11 +3466,11 @@ static int ecore_mcast_setup_e2(struct bxe_softc *sc,
raw->clear_pending(raw);
return ECORE_SUCCESS;
} else {
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* Send a ramrod */
@ -3783,11 +3955,11 @@ static int ecore_mcast_setup_e1(struct bxe_softc *sc,
raw->clear_pending(raw);
return ECORE_SUCCESS;
} else {
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* Send a ramrod */
@ -4172,7 +4344,7 @@ static bool ecore_credit_pool_get_entry_always_TRUE(
* If credit is negative pool operations will always succeed (unlimited pool).
*
*/
static inline void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
int base, int credit)
{
/* Zero the object first */
@ -4246,29 +4418,14 @@ void ecore_init_mac_credit_pool(struct bxe_softc *sc,
/* this should never happen! Block MAC operations. */
ecore_init_credit_pool(p, 0, 0);
}
} else {
/*
* CAM credit is equaly divided between all active functions
* on the PATH.
*/
if ((func_num > 1)) {
if (func_num > 0) {
if (!CHIP_REV_IS_SLOW(sc))
cam_sz = (MAX_MAC_CREDIT_E2
- GET_NUM_VFS_PER_PATH(sc))
/ func_num
+ GET_NUM_VFS_PER_PF(sc);
else
cam_sz = ECORE_CAM_SIZE_EMUL;
/* No need for CAM entries handling for 57712 and
* newer.
*/
ecore_init_credit_pool(p, -1, cam_sz);
} else if (func_num == 1) {
if (!CHIP_REV_IS_SLOW(sc))
cam_sz = MAX_MAC_CREDIT_E2;
cam_sz = PF_MAC_CREDIT_E2(sc, func_num);
else
cam_sz = ECORE_CAM_SIZE_EMUL;
@ -4298,8 +4455,9 @@ void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
* on the PATH.
*/
if (func_num > 0) {
int credit = MAX_VLAN_CREDIT_E2 / func_num;
ecore_init_credit_pool(p, func_id * credit, credit);
int credit = PF_VLAN_CREDIT_E2(sc, func_num);
ecore_init_credit_pool(p, -1/*unused for E2*/, credit);
} else
/* this should never happen! Block VLAN operations. */
ecore_init_credit_pool(p, 0, 0);
@ -4323,6 +4481,7 @@ static int ecore_setup_rss(struct bxe_softc *sc,
struct ecore_raw_obj *r = &o->raw;
struct eth_rss_update_ramrod_data *data =
(struct eth_rss_update_ramrod_data *)(r->rdata);
uint16_t caps = 0;
uint8_t rss_mode = 0;
int rc;
@ -4339,10 +4498,6 @@ static int ecore_setup_rss(struct bxe_softc *sc,
rss_mode = ETH_RSS_MODE_DISABLED;
else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
rss_mode = ETH_RSS_MODE_REGULAR;
#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
else if (ECORE_TEST_BIT(ECORE_RSS_MODE_ESX51, &p->rss_flags))
rss_mode = ETH_RSS_MODE_ESX51;
#endif
data->rss_mode = rss_mode;
@ -4350,35 +4505,41 @@ static int ecore_setup_rss(struct bxe_softc *sc,
/* RSS capabilities */
if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
data->udp_4tuple_dst_port_mask = ECORE_CPU_TO_LE16(p->tunnel_mask);
data->udp_4tuple_dst_port_value =
ECORE_CPU_TO_LE16(p->tunnel_value);
if (ECORE_TEST_BIT(ECORE_RSS_IPV4_VXLAN, &p->rss_flags))
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
if (ECORE_TEST_BIT(ECORE_RSS_IPV6_VXLAN, &p->rss_flags))
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
if (ECORE_TEST_BIT(ECORE_RSS_TUNN_INNER_HDRS, &p->rss_flags))
caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
/* RSS keys */
if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
sizeof(data->rss_key));
caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
data->capabilities = ECORE_CPU_TO_LE16(caps);
/* Hashing mask */
data->rss_result_mask = p->rss_result_mask;
@ -4395,18 +4556,11 @@ static int ecore_setup_rss(struct bxe_softc *sc,
ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
/* RSS keys */
if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
sizeof(data->rss_key));
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* Send a ramrod */
@ -4470,15 +4624,6 @@ void ecore_init_rss_config_obj(struct bxe_softc *sc,
rss_obj->config_rss = ecore_setup_rss;
}
int validate_vlan_mac(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *vlan_mac)
{
if (!vlan_mac->get_n_elements) {
ECORE_ERR("vlan mac object was not intialized\n");
return ECORE_INVAL;
}
return 0;
}
/********************** Queue state object ***********************************/
@ -4661,6 +4806,8 @@ static void ecore_q_fill_init_general_data(struct bxe_softc *sc,
ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
gen_data->fp_hsi_ver = params->fp_hsi;
ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
}
@ -4906,13 +5053,12 @@ static inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
/* Fill the ramrod data */
ecore_q_fill_setup_data_cmn(sc, params, rdata);
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc,
ramrod,
o->cids[ECORE_PRIMARY_CID_INDEX],
@ -4936,13 +5082,12 @@ static inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
ecore_q_fill_setup_data_cmn(sc, params, rdata);
ecore_q_fill_setup_data_e2(sc, params, rdata);
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc,
ramrod,
o->cids[ECORE_PRIMARY_CID_INDEX],
@ -4986,13 +5131,12 @@ static inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc, ramrod, o->cids[cid_index],
data_mapping, ETH_CONNECTION_TYPE);
}
@ -5069,6 +5213,14 @@ static void ecore_q_fill_update_data(struct bxe_softc *sc,
data->tx_switching_change_flg =
ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
&params->update_flags);
/* PTP */
data->handle_ptp_pkts_flg =
ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS,
&params->update_flags);
data->handle_ptp_pkts_change_flg =
ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS_CHNG,
&params->update_flags);
}
static inline int ecore_q_send_update(struct bxe_softc *sc,
@ -5094,13 +5246,12 @@ static inline int ecore_q_send_update(struct bxe_softc *sc,
/* Fill the ramrod data */
ecore_q_fill_update_data(sc, o, update_params, rdata);
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
o->cids[cid_index], data_mapping,
ETH_CONNECTION_TYPE);
@ -5147,11 +5298,61 @@ static inline int ecore_q_send_activate(struct bxe_softc *sc,
return ecore_q_send_update(sc, params);
}
static void ecore_q_fill_update_tpa_data(struct bxe_softc *sc,
struct ecore_queue_sp_obj *obj,
struct ecore_queue_update_tpa_params *params,
struct tpa_update_ramrod_data *data)
{
data->client_id = obj->cl_id;
data->complete_on_both_clients = params->complete_on_both_clients;
data->dont_verify_rings_pause_thr_flg =
params->dont_verify_thr;
data->max_agg_size = ECORE_CPU_TO_LE16(params->max_agg_sz);
data->max_sges_for_packet = params->max_sges_pkt;
data->max_tpa_queues = params->max_tpa_queues;
data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buff_sz);
data->sge_page_base_hi = ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
data->sge_page_base_lo = ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_pause_thr_high);
data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_pause_thr_low);
data->tpa_mode = params->tpa_mode;
data->update_ipv4 = params->update_ipv4;
data->update_ipv6 = params->update_ipv6;
}
static inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
struct ecore_queue_state_params *params)
{
/* TODO: Not implemented yet. */
return -1;
struct ecore_queue_sp_obj *o = params->q_obj;
struct tpa_update_ramrod_data *rdata =
(struct tpa_update_ramrod_data *)o->rdata;
ecore_dma_addr_t data_mapping = o->rdata_mapping;
struct ecore_queue_update_tpa_params *update_tpa_params =
&params->params.update_tpa;
uint16_t type;
/* Clear the ramrod data */
ECORE_MEMSET(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data */
ecore_q_fill_update_tpa_data(sc, o, update_tpa_params, rdata);
/* Add the function id inside the type, so that sp post function
* doesn't automatically add the PF func-id, this is required
* for operations done by PFs on behalf of their VFs
*/
type = ETH_CONNECTION_TYPE |
((o->func_id) << SPE_HDR_T_FUNCTION_ID_SHIFT);
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TPA_UPDATE,
o->cids[ECORE_PRIMARY_CID_INDEX],
data_mapping, type);
}
static inline int ecore_q_send_halt(struct bxe_softc *sc,
@ -5163,6 +5364,12 @@ static inline int ecore_q_send_halt(struct bxe_softc *sc,
ecore_dma_addr_t data_mapping = 0;
data_mapping = (ecore_dma_addr_t)o->cl_id;
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc,
RAMROD_CMD_ID_ETH_HALT,
o->cids[ECORE_PRIMARY_CID_INDEX],
@ -5744,6 +5951,10 @@ static int ecore_func_chk_transition(struct bxe_softc *sc,
(!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
next_state = ECORE_F_STATE_STARTED;
else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
(!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
next_state = ECORE_F_STATE_STARTED;
else if (cmd == ECORE_F_CMD_TX_STOP)
next_state = ECORE_F_STATE_TX_STOPPED;
@ -5753,6 +5964,10 @@ static int ecore_func_chk_transition(struct bxe_softc *sc,
(!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
next_state = ECORE_F_STATE_TX_STOPPED;
else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
(!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
next_state = ECORE_F_STATE_TX_STOPPED;
else if (cmd == ECORE_F_CMD_TX_START)
next_state = ECORE_F_STATE_STARTED;
@ -6020,17 +6235,51 @@ static inline int ecore_func_send_start(struct bxe_softc *sc,
rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
rdata->path_id = ECORE_PATH_ID(sc);
rdata->network_cos_mode = start_params->network_cos_mode;
rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
/*
* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
rdata->vxlan_dst_port = start_params->vxlan_dst_port;
rdata->geneve_dst_port = start_params->geneve_dst_port;
rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
rdata->inner_rss = start_params->inner_rss;
rdata->sd_accept_mf_clss_fail = start_params->class_fail;
if (start_params->class_fail_ethtype) {
rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
rdata->sd_accept_mf_clss_fail_ethtype =
ECORE_CPU_TO_LE16(start_params->class_fail_ethtype);
}
rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
/** @@@TMP - until FW 7.10.7 (which will introduce an HSI change)
* `sd_vlan_eth_type' will replace ethertype in SD mode even if
* it's set to 0; This will probably break SD, so we're setting it
* to ethertype 0x8100 for now.
*/
if (start_params->sd_vlan_eth_type)
rdata->sd_vlan_eth_type =
ECORE_CPU_TO_LE16(start_params->sd_vlan_eth_type);
else
rdata->sd_vlan_eth_type =
ECORE_CPU_TO_LE16((uint16_t) 0x8100);
rdata->no_added_tags = start_params->no_added_tags;
rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
if (rdata->c2s_pri_tt_valid) {
memcpy(rdata->c2s_pri_trans_table.val,
start_params->c2s_pri,
MAX_VLAN_PRIORITIES);
rdata->c2s_pri_default = start_params->c2s_pri_default;
}
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
data_mapping, NONE_CONNECTION_TYPE);
}
@ -6048,10 +6297,68 @@ static inline int ecore_func_send_switch_update(struct bxe_softc *sc,
ECORE_MEMSET(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
&switch_update_params->changes)) {
rdata->tx_switch_suspend_change_flg = 1;
rdata->tx_switch_suspend = switch_update_params->suspend;
rdata->tx_switch_suspend =
ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
&switch_update_params->changes);
}
if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
&switch_update_params->changes)) {
rdata->sd_vlan_tag_change_flg = 1;
rdata->sd_vlan_tag =
ECORE_CPU_TO_LE16(switch_update_params->vlan);
}
if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
&switch_update_params->changes)) {
rdata->sd_vlan_eth_type_change_flg = 1;
rdata->sd_vlan_eth_type =
ECORE_CPU_TO_LE16(switch_update_params->vlan_eth_type);
}
if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
&switch_update_params->changes)) {
rdata->sd_vlan_force_pri_change_flg = 1;
if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
&switch_update_params->changes))
rdata->sd_vlan_force_pri_flg = 1;
rdata->sd_vlan_force_pri_flg =
switch_update_params->vlan_force_prio;
}
if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes)) {
rdata->update_tunn_cfg_flg = 1;
if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
&switch_update_params->changes))
rdata->inner_clss_l2gre = 1;
if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
&switch_update_params->changes))
rdata->inner_clss_vxlan = 1;
if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
&switch_update_params->changes))
rdata->inner_clss_l2geneve = 1;
if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_RSS,
&switch_update_params->changes))
rdata->inner_rss = 1;
rdata->vxlan_dst_port =
ECORE_CPU_TO_LE16(switch_update_params->vxlan_dst_port);
rdata->geneve_dst_port =
ECORE_CPU_TO_LE16(switch_update_params->geneve_dst_port);
}
rdata->echo = SWITCH_UPDATE;
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
data_mapping, NONE_CONNECTION_TYPE);
}
@ -6078,11 +6385,11 @@ static inline int ecore_func_send_afex_update(struct bxe_softc *sc,
rdata->allowed_priorities = afex_update_params->allowed_priorities;
rdata->echo = AFEX_UPDATE;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
ECORE_MSG(sc,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@ -6115,17 +6422,17 @@ inline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
/* send in echo type of sub command */
rdata->echo = afex_vif_params->afex_vif_list_command;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
*/
ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
rdata->afex_vif_list_command, rdata->vif_list_index,
rdata->func_bit_map, rdata->func_to_clear);
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* this ramrod sends data directly and not through DMA mapping */
return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
*p_rdata, NONE_CONNECTION_TYPE);
@ -6165,10 +6472,53 @@ static inline int ecore_func_send_tx_start(struct bxe_softc *sc,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
data_mapping, NONE_CONNECTION_TYPE);
}
static inline int ecore_func_send_set_timesync(struct bxe_softc *sc,
struct ecore_func_state_params *params)
{
struct ecore_func_sp_obj *o = params->f_obj;
struct set_timesync_ramrod_data *rdata =
(struct set_timesync_ramrod_data *)o->rdata;
ecore_dma_addr_t data_mapping = o->rdata_mapping;
struct ecore_func_set_timesync_params *set_timesync_params =
&params->params.set_timesync;
ECORE_MEMSET(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
rdata->offset_cmd = set_timesync_params->offset_cmd;
rdata->add_sub_drift_adjust_value =
set_timesync_params->add_sub_drift_adjust_value;
rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
rdata->offset_delta.lo =
ECORE_CPU_TO_LE32(U64_LO(set_timesync_params->offset_delta));
rdata->offset_delta.hi =
ECORE_CPU_TO_LE32(U64_HI(set_timesync_params->offset_delta));
ECORE_MSG(sc, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
rdata->drift_adjust_cmd, rdata->offset_cmd,
rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
rdata->drift_adjust_period, rdata->offset_delta.lo,
rdata->offset_delta.hi);
return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
data_mapping, NONE_CONNECTION_TYPE);
}
static int ecore_func_send_cmd(struct bxe_softc *sc,
struct ecore_func_state_params *params)
{
@ -6191,6 +6541,8 @@ static int ecore_func_send_cmd(struct bxe_softc *sc,
return ecore_func_send_tx_start(sc, params);
case ECORE_F_CMD_SWITCH_UPDATE:
return ecore_func_send_switch_update(sc, params);
case ECORE_F_CMD_SET_TIMESYNC:
return ecore_func_send_set_timesync(sc, params);
default:
ECORE_ERR("Unknown command: %d\n", params->cmd);
return ECORE_INVAL;

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
* Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -11,7 +11,7 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
@ -246,8 +246,8 @@ ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len)
#else
extern unsigned long bxe_debug;
extern unsigned long bxe_debug;
#define BXE_DEBUG_ECORE_DBG_BREAK_IF 0x01
#define BXE_DEBUG_ECORE_BUG 0x02
#define BXE_DEBUG_ECORE_BUG_ON 0x04
@ -524,7 +524,8 @@ enum {
ECORE_FILTER_MCAST_SCHED,
ECORE_FILTER_RSS_CONF_PENDING,
ECORE_AFEX_FCOE_Q_UPDATE_PENDING,
ECORE_AFEX_PENDING_VIFSET_MCP_ACK
ECORE_AFEX_PENDING_VIFSET_MCP_ACK,
ECORE_FILTER_VXLAN_PENDING
};
struct ecore_raw_obj {
@ -568,10 +569,16 @@ struct ecore_vlan_mac_ramrod_data {
uint16_t vlan;
};
struct ecore_vxlan_fltr_ramrod_data {
uint8_t innermac[ETH_ALEN];
uint32_t vni;
};
union ecore_classification_ramrod_data {
struct ecore_mac_ramrod_data mac;
struct ecore_vlan_ramrod_data vlan;
struct ecore_vlan_mac_ramrod_data vlan_mac;
struct ecore_vxlan_fltr_ramrod_data vxlan_fltr;
};
/* VLAN_MAC commands */
@ -722,6 +729,13 @@ enum {
ECORE_DONT_CONSUME_CAM_CREDIT,
ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
};
/* When looking for matching filters, some flags are not interesting */
#define ECORE_VLAN_MAC_CMP_MASK (1 << ECORE_UC_LIST_MAC | \
1 << ECORE_ETH_MAC | \
1 << ECORE_ISCSI_ETH_MAC | \
1 << ECORE_NETQ_ETH_MAC)
#define ECORE_VLAN_MAC_CMP_FLAGS(flags) \
((flags) & ECORE_VLAN_MAC_CMP_MASK)
struct ecore_vlan_mac_ramrod_params {
/* Object to run the command from */
@ -1165,10 +1179,9 @@ enum {
ECORE_RSS_IPV6_TCP,
ECORE_RSS_IPV6_UDP,
ECORE_RSS_TUNNELING,
#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
ECORE_RSS_MODE_ESX51,
#endif
ECORE_RSS_IPV4_VXLAN,
ECORE_RSS_IPV6_VXLAN,
ECORE_RSS_TUNN_INNER_HDRS,
};
struct ecore_config_rss_params {
@ -1191,10 +1204,6 @@ struct ecore_config_rss_params {
/* valid only iff ECORE_RSS_UPDATE_TOE is set */
uint16_t toe_rss_bitmap;
/* valid iff ECORE_RSS_TUNNELING is set */
uint16_t tunnel_value;
uint16_t tunnel_mask;
};
struct ecore_rss_config_obj {
@ -1232,6 +1241,8 @@ enum {
ECORE_Q_UPDATE_SILENT_VLAN_REM,
ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
ECORE_Q_UPDATE_TX_SWITCHING,
ECORE_Q_UPDATE_PTP_PKTS_CHNG,
ECORE_Q_UPDATE_PTP_PKTS,
};
/* Allowed Queue states */
@ -1315,6 +1326,10 @@ enum ecore_q_type {
#define ECORE_MULTI_TX_COS_E3B0 3
#define ECORE_MULTI_TX_COS 3 /* Maximum possible */
#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
* timesync-related ramrods must not use this DMAE command ID.
*/
#define FW_DMAE_CMD_ID 6
struct ecore_queue_init_params {
struct {
@ -1357,6 +1372,24 @@ struct ecore_queue_update_params {
uint8_t cid_index;
};
struct ecore_queue_update_tpa_params {
ecore_dma_addr_t sge_map;
uint8_t update_ipv4;
uint8_t update_ipv6;
uint8_t max_tpa_queues;
uint8_t max_sges_pkt;
uint8_t complete_on_both_clients;
uint8_t dont_verify_thr;
uint8_t tpa_mode;
uint8_t _pad;
uint16_t sge_buff_sz;
uint16_t max_agg_sz;
uint16_t sge_pause_thr_low;
uint16_t sge_pause_thr_high;
};
struct rxq_pause_params {
uint16_t bd_th_lo;
uint16_t bd_th_hi;
@ -1375,6 +1408,8 @@ struct ecore_general_setup_params {
uint8_t spcl_id;
uint16_t mtu;
uint8_t cos;
uint8_t fp_hsi;
};
struct ecore_rxq_setup_params {
@ -1451,6 +1486,7 @@ struct ecore_queue_state_params {
/* Params according to the current command */
union {
struct ecore_queue_update_params update;
struct ecore_queue_update_tpa_params update_tpa;
struct ecore_queue_setup_params setup;
struct ecore_queue_init_params init;
struct ecore_queue_setup_tx_only_params tx_only;
@ -1530,6 +1566,22 @@ struct ecore_queue_sp_obj {
};
/********************** Function state update *********************************/
/* UPDATE command options */
enum {
ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
ECORE_F_UPDATE_TUNNEL_INNER_RSS,
};
/* Allowed Function states */
enum ecore_func_state {
ECORE_F_STATE_RESET,
@ -1550,6 +1602,7 @@ enum ecore_func_cmd {
ECORE_F_CMD_TX_STOP,
ECORE_F_CMD_TX_START,
ECORE_F_CMD_SWITCH_UPDATE,
ECORE_F_CMD_SET_TIMESYNC,
ECORE_F_CMD_MAX,
};
@ -1591,19 +1644,53 @@ struct ecore_func_start_params {
/* Function cos mode */
uint8_t network_cos_mode;
/* NVGRE classification enablement */
uint8_t nvgre_clss_en;
/* UDP dest port for VXLAN */
uint16_t vxlan_dst_port;
/* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
uint8_t gre_tunnel_mode;
/* UDP dest port for Geneve */
uint16_t geneve_dst_port;
/* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
uint8_t gre_tunnel_rss;
/* Enable inner Rx classifications for L2GRE packets */
uint8_t inner_clss_l2gre;
/* Enable inner Rx classifications for L2-Geneve packets */
uint8_t inner_clss_l2geneve;
/* Enable inner Rx classification for vxlan packets */
uint8_t inner_clss_vxlan;
/* Enable RSS according to inner header */
uint8_t inner_rss;
/** Allows accepting of packets failing MF classification, possibly
* only matching a given ethertype
*/
uint8_t class_fail;
uint16_t class_fail_ethtype;
/* Override priority of output packets */
uint8_t sd_vlan_force_pri;
uint8_t sd_vlan_force_pri_val;
/* Replace vlan's ethertype */
uint16_t sd_vlan_eth_type;
/* Prevent inner vlans from being added by FW */
uint8_t no_added_tags;
/* Inner-to-Outer vlan priority mapping */
uint8_t c2s_pri[MAX_VLAN_PRIORITIES];
uint8_t c2s_pri_default;
uint8_t c2s_pri_valid;
};
struct ecore_func_switch_update_params {
uint8_t suspend;
unsigned long changes; /* ECORE_F_UPDATE_XX bits */
uint16_t vlan;
uint16_t vlan_eth_type;
uint8_t vlan_force_prio;
uint16_t vxlan_dst_port;
uint16_t geneve_dst_port;
};
struct ecore_func_afex_update_params {
@ -1618,11 +1705,28 @@ struct ecore_func_afex_viflists_params {
uint8_t afex_vif_list_command;
uint8_t func_to_clear;
};
struct ecore_func_tx_start_params {
struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
uint8_t dcb_enabled;
uint8_t dcb_version;
uint8_t dont_add_pri_0;
uint8_t dcb_outer_pri[MAX_TRAFFIC_TYPES];
};
struct ecore_func_set_timesync_params {
/* Reset, set or keep the current drift value */
uint8_t drift_adjust_cmd;
/* Dec, inc or keep the current offset */
uint8_t offset_cmd;
/* Drift value direction */
uint8_t add_sub_drift_adjust_value;
/* Drift, period and offset values to be used according to the commands
* above.
*/
uint8_t drift_adjust_value;
uint32_t drift_adjust_period;
uint64_t offset_delta;
};
struct ecore_func_state_params {
@ -1643,6 +1747,7 @@ struct ecore_func_state_params {
struct ecore_func_afex_update_params afex_update;
struct ecore_func_afex_viflists_params afex_viflists;
struct ecore_func_tx_start_params tx_start;
struct ecore_func_set_timesync_params set_timesync;
} params;
};
@ -1779,6 +1884,14 @@ void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
struct ecore_credit_pool_obj *macs_pool,
struct ecore_credit_pool_obj *vlans_pool);
void ecore_init_vxlan_fltr_obj(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *vlan_mac_obj,
uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
ecore_dma_addr_t rdata_mapping, int state,
unsigned long *pstate, ecore_obj_type type,
struct ecore_credit_pool_obj *macs_pool,
struct ecore_credit_pool_obj *vlans_pool);
int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *o);
void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
@ -1851,6 +1964,8 @@ void ecore_init_mac_credit_pool(struct bxe_softc *sc,
void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
struct ecore_credit_pool_obj *p, uint8_t func_id,
uint8_t func_num);
void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
int base, int credit);
/****************** RSS CONFIGURATION ****************/
void ecore_init_rss_config_obj(struct bxe_softc *sc,
@ -1878,9 +1993,14 @@ int ecore_config_rss(struct bxe_softc *sc,
void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
uint8_t *ind_table);
/* set as inline so printout will show the offending function */
int validate_vlan_mac(struct bxe_softc *sc,
struct ecore_vlan_mac_obj *vlan_mac);
#define PF_MAC_CREDIT_E2(sc, func_num) \
((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(sc) * VF_MAC_CREDIT_CNT) / \
func_num + GET_NUM_VFS_PER_PF(sc) * VF_MAC_CREDIT_CNT)
#define PF_VLAN_CREDIT_E2(sc, func_num) \
((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(sc) * VF_VLAN_CREDIT_CNT) / \
func_num + GET_NUM_VFS_PER_PF(sc) * VF_VLAN_CREDIT_CNT)
#endif /* ECORE_SP_H */

View File

@ -17,5 +17,6 @@ SRCS += bxe.c \
57712_init_values.c
CFLAGS += -I${BXE}
CFLAGS += -Wno-shift-negative-value
.include <bsd.kmod.mk>