qede: add base driver

The base driver is the backend module for the QLogic FastLinQ QL4xxxx
25G/40G CNA family of adapters as well as their virtual functions (VF)
in SR-IOV context.

The purpose of the base module is to:
 - provide all the common code that will be shared between the various
   drivers that would be used with said line of products. Flows such as
   chip initialization and de-initialization fall under this category.
 - abstract the protocol-specific HW & FW components, allowing the
   protocol drivers to have clean APIs, which are detached in its
   slowpath configuration from the actual Hardware Software Interface(HSI).

This patch adds a base module without any protocol-specific bits.
I.e., this adds a basic implementation that almost entirely falls under
the first category.

Signed-off-by: Harish Patil <harish.patil@qlogic.com>
Signed-off-by: Rasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: Sony Chacko <sony.chacko@qlogic.com>
This commit is contained in:
Rasesh Mody 2016-04-27 07:18:36 -07:00 committed by Bruce Richardson
parent 1554106b22
commit ec94dbc573
46 changed files with 27982 additions and 0 deletions

View File

@ -332,6 +332,12 @@ M: Rasesh Mody <rasesh.mody@qlogic.com>
F: drivers/net/bnx2x/
F: doc/guides/nics/bnx2x.rst
QLogic qede PMD
M: Harish Patil <harish.patil@qlogic.com>
M: Rasesh Mody <rasesh.mody@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
F: drivers/net/qede/
RedHat virtio
M: Huawei Xie <huawei.xie@intel.com>
M: Yuanhan Liu <yuanhan.liu@linux.intel.com>

View File

@ -0,0 +1,28 @@
/*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of QLogic Corporation nor the name of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/

81
drivers/net/qede/Makefile Normal file
View File

@ -0,0 +1,81 @@
# Copyright (c) 2016 QLogic Corporation.
# All rights reserved.
# www.qlogic.com
#
# See LICENSE.qede_pmd for copyright and licensing details.
include $(RTE_SDK)/mk/rte.vars.mk
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
#
# OS
#
OS_TYPE := $(shell uname -s)
#
# CFLAGS
#
CFLAGS_BASE_DRIVER = -Wno-unused-parameter
CFLAGS_BASE_DRIVER += -Wno-unused-value
CFLAGS_BASE_DRIVER += -Wno-sign-compare
CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
CFLAGS_BASE_DRIVER += -Wno-cast-qual
CFLAGS_BASE_DRIVER += -Wno-unused-function
CFLAGS_BASE_DRIVER += -Wno-unused-variable
CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
ifeq ($(OS_TYPE),Linux)
ifeq ($(shell clang -Wno-shift-negative-value -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
endif
endif
ifneq (,$(filter gcc gcc48,$(CC)))
CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
CFLAGS_BASE_DRIVER += -Wno-missing-declarations
CFLAGS_BASE_DRIVER += -Wno-maybe-uninitialized
CFLAGS_BASE_DRIVER += -Wno-strict-prototypes
else ifeq ($(CC), clang)
CFLAGS_BASE_DRIVER += -Wno-format-extra-args
CFLAGS_BASE_DRIVER += -Wno-visibility
CFLAGS_BASE_DRIVER += -Wno-empty-body
CFLAGS_BASE_DRIVER += -Wno-invalid-source-encoding
CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized
ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
endif
else
#icc flags
endif
#
# Add extra flags for base ecore driver files
# to disable warnings in them
#
#
BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_BASE_DRIVER)))
#
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
# dependent libs:
DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_net lib/librte_malloc
include $(RTE_SDK)/mk/rte.lib.mk

View File

@ -0,0 +1,172 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#include <zlib.h>
#include <rte_memzone.h>
#include <rte_errno.h>
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_hw.h"
unsigned long qede_log2_align(unsigned long n)
{
unsigned long ret = n ? 1 : 0;
unsigned long _n = n >> 1;
while (_n) {
_n >>= 1;
ret <<= 1;
}
if (ret < n)
ret <<= 1;
return ret;
}
u32 qede_osal_log2(u32 val)
{
u32 log = 0;
while (val >>= 1)
log++;
return log;
}
inline void qede_set_bit(u32 nr, unsigned long *addr)
{
__sync_fetch_and_or(addr, (1UL << nr));
}
inline void qede_clr_bit(u32 nr, unsigned long *addr)
{
__sync_fetch_and_and(addr, ~(1UL << nr));
}
inline bool qede_test_bit(u32 nr, unsigned long *addr)
{
bool res;
rte_mb();
res = ((*addr) & (1UL << nr)) != 0;
rte_mb();
return res;
}
static inline u32 qede_ffz(unsigned long word)
{
unsigned long first_zero;
first_zero = __builtin_ffsl(~word);
return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
}
inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
{
u32 i;
u32 nwords = 0;
OSAL_BUILD_BUG_ON(!limit);
nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
for (i = 0; i < nwords; i++)
if (~(addr[i] != 0))
break;
return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
}
void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
dma_addr_t *phys, size_t size)
{
const struct rte_memzone *mz;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
if (core_id == (unsigned int)LCORE_ID_ANY)
core_id = 0;
socket_id = rte_lcore_to_socket_id(core_id);
mz = rte_memzone_reserve_aligned(mz_name, size,
socket_id, 0, RTE_CACHE_LINE_SIZE);
if (!mz) {
DP_ERR(p_dev, "Unable to allocate DMA memory "
"of size %zu bytes - %s\n",
size, rte_strerror(rte_errno));
*phys = 0;
return OSAL_NULL;
}
*phys = mz->phys_addr;
DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
"size=%zu phys=0x%lx virt=%p on socket=%u\n",
mz->len, mz->phys_addr, mz->addr, socket_id);
return mz->addr;
}
void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
dma_addr_t *phys, size_t size, int align)
{
const struct rte_memzone *mz;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
if (core_id == (unsigned int)LCORE_ID_ANY)
core_id = 0;
socket_id = rte_lcore_to_socket_id(core_id);
mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align);
if (!mz) {
DP_ERR(p_dev, "Unable to allocate DMA memory "
"of size %zu bytes - %s\n",
size, rte_strerror(rte_errno));
*phys = 0;
return OSAL_NULL;
}
*phys = mz->phys_addr;
DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
"aligned memory size=%zu phys=0x%lx virt=%p core=%d\n",
mz->len, mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
{
int rc;
p_hwfn->stream->next_in = input_buf;
p_hwfn->stream->avail_in = input_len;
p_hwfn->stream->next_out = unzip_buf;
p_hwfn->stream->avail_out = max_size;
rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
if (rc != Z_OK) {
DP_ERR(p_hwfn,
"zlib init failed, rc = %d\n", rc);
return 0;
}
rc = inflate(p_hwfn->stream, Z_FINISH);
inflateEnd(p_hwfn->stream);
if (rc != Z_OK && rc != Z_STREAM_END) {
DP_ERR(p_hwfn,
"FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
rc);
return 0;
}
return p_hwfn->stream->total_out / 4;
}

View File

@ -0,0 +1,389 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __BCM_OSAL_H
#define __BCM_OSAL_H
#include <rte_byteorder.h>
#include <rte_spinlock.h>
#include <rte_malloc.h>
#include <rte_atomic.h>
#include <rte_memcpy.h>
#include <rte_log.h>
#include <rte_cycles.h>
#include <rte_debug.h>
#include <rte_ether.h>
/* Forward declaration */
struct ecore_dev;
struct ecore_hwfn;
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#undef __BIG_ENDIAN
#ifndef __LITTLE_ENDIAN
#define __LITTLE_ENDIAN
#endif
#else
#undef __LITTLE_ENDIAN
#ifndef __BIG_ENDIAN
#define __BIG_ENDIAN
#endif
#endif
/* Memory Types */
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef int16_t s16;
typedef int32_t s32;
typedef u16 __le16;
typedef u32 __le32;
typedef u32 OSAL_BE32;
#define osal_uintptr_t uintptr_t
typedef phys_addr_t dma_addr_t;
typedef rte_spinlock_t osal_spinlock_t;
typedef void *osal_dpc_t;
typedef size_t osal_size_t;
typedef intptr_t osal_int_ptr_t;
typedef int bool;
#define true 1
#define false 0
#define nothing do {} while (0)
/* Delays */
#define DELAY(x) rte_delay_us(x)
#define usec_delay(x) DELAY(x)
#define msec_delay(x) DELAY(1000 * (x))
#define OSAL_UDELAY(time) usec_delay(time)
#define OSAL_MSLEEP(time) msec_delay(time)
/* Memory allocations and deallocations */
#define OSAL_NULL ((void *)0)
#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
#define OSAL_VALLOC(dev, size) rte_malloc("qede", size, 0)
#define OSAL_FREE(dev, memory) rte_free((void *)memory)
#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
#define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size)
#define OSAL_MEMSET(dst, val, length) \
memset(dst, val, length)
void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
size_t, int);
#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
osal_dma_alloc_coherent(dev, phys, size)
#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
osal_dma_alloc_coherent_aligned(dev, phys, size, align)
/* TODO: */
#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
/* HW reads/writes */
#define DIRECT_REG_RD(_dev, _reg_addr) \
(*((volatile u32 *) (_reg_addr)))
#define REG_RD(_p_hwfn, _reg_offset) \
DIRECT_REG_RD(_p_hwfn, \
((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
#define DIRECT_REG_WR16(_reg_addr, _val) \
(*((volatile u16 *)(_reg_addr)) = _val)
#define DIRECT_REG_WR(_dev, _reg_addr, _val) \
(*((volatile u32 *)(_reg_addr)) = _val)
#define REG_WR(_p_hwfn, _reg_offset, _val) \
DIRECT_REG_WR(NULL, \
((u8 *)((uintptr_t)(_p_hwfn->regview)) + (_reg_offset)), (u32)_val)
#define REG_WR16(_p_hwfn, _reg_offset, _val) \
DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \
(_reg_offset)), (u16)_val)
#define DOORBELL(_p_hwfn, _db_addr, _val) \
DIRECT_REG_WR(_p_hwfn, \
((u8 *)(uintptr_t)(_p_hwfn->doorbells) + (_db_addr)), (u32)_val)
/* Mutexes */
typedef pthread_mutex_t osal_mutex_t;
#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
#define OSAL_MUTEX_DEALLOC(lock) nothing
/* Spinlocks */
#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock)
#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock)
#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) nothing
#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing
/* DPC */
#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
#define OSAL_DPC_INIT(dpc, hwfn) nothing
#define OSAL_POLL_MODE_DPC(hwfn) nothing
/* Lists */
#define OSAL_LIST_SPLICE_INIT(new_list, list) nothing
#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) nothing
typedef struct _osal_list_entry_t {
struct _osal_list_entry_t *next, *prev;
} osal_list_entry_t;
typedef struct osal_list_t {
osal_list_entry_t *head, *tail;
unsigned long cnt;
} osal_list_t;
#define OSAL_LIST_INIT(list) \
do { \
(list)->head = NULL; \
(list)->tail = NULL; \
(list)->cnt = 0; \
} while (0)
#define OSAL_LIST_PUSH_HEAD(entry, list) \
do { \
(entry)->prev = (osal_list_entry_t *)0; \
(entry)->next = (list)->head; \
if ((list)->tail == (osal_list_entry_t *)0) { \
(list)->tail = (entry); \
} else { \
(list)->head->prev = (entry); \
} \
(list)->head = (entry); \
(list)->cnt++; \
} while (0)
#define OSAL_LIST_PUSH_TAIL(entry, list) \
do { \
(entry)->next = (osal_list_entry_t *)0; \
(entry)->prev = (list)->tail; \
if ((list)->tail) { \
(list)->tail->next = (entry); \
} else { \
(list)->head = (entry); \
} \
(list)->tail = (entry); \
(list)->cnt++; \
} while (0)
#define OSAL_LIST_FIRST_ENTRY(list, type, field) \
(type *)((list)->head)
#define OSAL_LIST_REMOVE_ENTRY(entry, list) \
do { \
if ((list)->head == (entry)) { \
if ((list)->head) { \
(list)->head = (list)->head->next; \
if ((list)->head) { \
(list)->head->prev = (osal_list_entry_t *)0;\
} else { \
(list)->tail = (osal_list_entry_t *)0; \
} \
(list)->cnt--; \
} \
} else if ((list)->tail == (entry)) { \
if ((list)->tail) { \
(list)->tail = (list)->tail->prev; \
if ((list)->tail) { \
(list)->tail->next = (osal_list_entry_t *)0;\
} else { \
(list)->head = (osal_list_entry_t *)0; \
} \
(list)->cnt--; \
} \
} else { \
(entry)->prev->next = (entry)->next; \
(entry)->next->prev = (entry)->prev; \
(list)->cnt--; \
} \
} while (0)
#define OSAL_LIST_IS_EMPTY(list) \
((list)->cnt == 0)
#define OSAL_LIST_NEXT(entry, field, type) \
(type *)((&((entry)->field))->next)
/* TODO: Check field, type order */
#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \
for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \
entry; \
entry = OSAL_LIST_NEXT(entry, field, type))
#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \
for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \
tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL; \
entry != NULL; \
entry = (type *)tmp_entry, \
tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL)
/* TODO: OSAL_LIST_INSERT_ENTRY_AFTER */
#define OSAL_LIST_INSERT_ENTRY_AFTER(new_entry, entry, list) \
OSAL_LIST_PUSH_HEAD(new_entry, list)
/* PCI config space */
#define OSAL_PCI_READ_CONFIG_BYTE(dev, address, dst) nothing
#define OSAL_PCI_READ_CONFIG_WORD(dev, address, dst) nothing
#define OSAL_PCI_READ_CONFIG_DWORD(dev, address, dst) nothing
#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, pcie_id) 0
#define OSAL_PCI_FIND_CAPABILITY(dev, pcie_id) 0
#define OSAL_PCI_WRITE_CONFIG_WORD(dev, address, val) nothing
#define OSAL_BAR_SIZE(dev, bar_id) 0
/* Barriers */
#define OSAL_MMIOWB(dev) rte_wmb()
#define OSAL_BARRIER(dev) rte_compiler_barrier()
#define OSAL_SMP_RMB(dev) rte_rmb()
#define OSAL_SMP_WMB(dev) rte_wmb()
#define OSAL_RMB(dev) rte_rmb()
#define OSAL_WMB(dev) rte_wmb()
#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
#define OSAL_BITS_PER_BYTE (8)
#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE)
#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1)
/* Bitops */
void qede_set_bit(u32, unsigned long *);
#define OSAL_SET_BIT(bit, bitmap) \
qede_set_bit(bit, bitmap)
void qede_clr_bit(u32, unsigned long *);
#define OSAL_CLEAR_BIT(bit, bitmap) \
qede_clr_bit(bit, bitmap)
bool qede_test_bit(u32, unsigned long *);
#define OSAL_TEST_BIT(bit, bitmap) \
qede_test_bit(bit, bitmap)
u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \
qede_find_first_zero_bit(bitmap, length)
#define OSAL_BUILD_BUG_ON(cond) nothing
#define ETH_ALEN ETHER_ADDR_LEN
#define OSAL_LINK_UPDATE(hwfn) nothing
/* SR-IOV channel */
#define OSAL_VF_FLR_UPDATE(hwfn) nothing
#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
#define OSAL_PF_VF_MSG(hwfn, vfid) 0
#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing
#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) nothing
#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
#define OSAL_IOV_GET_OS_TYPE() 0
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \
qede_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf)
/* TODO: */
#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
#define OSAL_NUM_ACTIVE_CPU() 0
/* Utility functions */
#define RTE_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define DIV_ROUND_UP(size, to_what) RTE_DIV_ROUND_UP(size, to_what)
#define RTE_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define ROUNDUP(value, to_what) RTE_ROUNDUP((value), (to_what))
unsigned long qede_log2_align(unsigned long n);
#define OSAL_ROUNDUP_POW_OF_TWO(val) \
qede_log2_align(val)
u32 qede_osal_log2(u32);
#define OSAL_LOG2(val) \
qede_osal_log2(val)
#define PRINT(format, ...) printf
#define PRINT_ERR(format, ...) PRINT
#define OFFSETOF(str, field) __builtin_offsetof(str, field)
#define OSAL_ASSERT(is_assert) assert(is_assert)
#define OSAL_BEFORE_PF_START(file, engine) nothing
#define OSAL_AFTER_PF_STOP(file, engine) nothing
/* Endian macros */
#define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val)
#define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val)
#define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val)
#define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val)
#define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val)
#define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val)
#define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val)
#define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr)
#define OSAL_SPRINTF(name, pattern, ...) \
sprintf(name, pattern, ##__VA_ARGS__)
#define OSAL_STRLEN(string) strlen(string)
#define OSAL_STRCPY(dst, string) strcpy(dst, string)
#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
#define OSAL_INLINE inline
#define OSAL_REG_ADDR(_p_hwfn, _offset) \
(void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset))
#define OSAL_PAGE_SIZE 4096
#define OSAL_IOMEM volatile
#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0)
#define OSAL_MIN_T(type, __min1, __min2) \
((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
#define OSAL_MAX_T(type, __max1, __max2) \
((type)(__max1) > (type)(__max2) ? (type)(__max1) : (type)(__max2))
#define OSAL_GET_PROTOCOL_STATS(p_hwfn, type, stats) (0)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
#endif /* __BCM_OSAL_H */

View File

@ -0,0 +1,714 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 7
#define FW_REVISION_VERSION 7
#define FW_ENGINEERING_VERSION 0
/***********************/
/* COMMON HW CONSTANTS */
/***********************/
/* PCI functions */
#define MAX_NUM_PORTS_K2 (4)
#define MAX_NUM_PORTS_BB (2)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
#define MAX_NUM_PFS_K2 (16)
#define MAX_NUM_PFS_BB (8)
#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_BB (120)
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
#define MAX_NUM_L2_QUEUES_K2 (320)
#define MAX_NUM_L2_QUEUES_BB (256)
#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
#define NUM_PHYS_TCS_4PORT_K2 (4)
#define NUM_OF_PHYS_TCS (8)
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
#define LB_TC (NUM_OF_PHYS_TCS)
/* Num of possible traffic priority values */
#define NUM_OF_PRIO (8)
#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
/* CIDs */
#define NUM_OF_CONNECTION_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
/*****************/
/* CDU CONSTANTS */
/*****************/
#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
/*****************/
/* DQ CONSTANTS */
/*****************/
/* DEMS */
#define DQ_DEMS_LEGACY 0
/* XCM agg val selection */
#define DQ_XCM_AGG_VAL_SEL_WORD2 0
#define DQ_XCM_AGG_VAL_SEL_WORD3 1
#define DQ_XCM_AGG_VAL_SEL_WORD4 2
#define DQ_XCM_AGG_VAL_SEL_WORD5 3
#define DQ_XCM_AGG_VAL_SEL_REG3 4
#define DQ_XCM_AGG_VAL_SEL_REG4 5
#define DQ_XCM_AGG_VAL_SEL_REG5 6
#define DQ_XCM_AGG_VAL_SEL_REG6 7
/* XCM agg val selection */
#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
DQ_XCM_AGG_VAL_SEL_WORD2
#define DQ_XCM_ETH_TX_BD_CONS_CMD \
DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_CORE_TX_BD_CONS_CMD \
DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ETH_TX_BD_PROD_CMD \
DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_CORE_TX_BD_PROD_CMD \
DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_CORE_SPQ_PROD_CMD \
DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
/* XCM agg counter flag selection */
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
/* XCM agg counter flag selection */
#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF18)
#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF18)
#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF23)
/*****************/
/* QM CONSTANTS */
/*****************/
/* number of TX queues in the QM */
#define MAX_QM_TX_QUEUES_K2 512
#define MAX_QM_TX_QUEUES_BB 448
#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
/* number of Other queues in the QM */
#define MAX_QM_OTHER_QUEUES_BB 64
#define MAX_QM_OTHER_QUEUES_K2 128
#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
/* number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
/* base number of Tx PQs in the CM PQ representation.
* should be used when storing PQ IDs in CM PQ registers and context
*/
#define CM_TX_PQ_BASE 0x200
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
#define QM_BYTE_CRD_REG_WIDTH 24
#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
#define QM_WFQ_CRD_REG_WIDTH 32
#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
#define QM_RL_CRD_REG_WIDTH 32
#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
/*****************/
/* CAU CONSTANTS */
/*****************/
#define CAU_FSM_ETH_RX 0
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB 12
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
#define CAU_HC_ENABLE_STATE 0
/*****************/
/* IGU CONSTANTS */
/*****************/
#define MAX_SB_PER_PATH_K2 (368)
#define MAX_SB_PER_PATH_BB (288)
#define MAX_TOT_SB_PER_PATH \
MAX_SB_PER_PATH_K2
#define MAX_SB_PER_PF_MIMD 129
#define MAX_SB_PER_PF_SIMD 64
#define MAX_SB_PER_VF 64
/* Memory addresses on the BAR for the IGU Sub Block */
#define IGU_MEM_BASE 0x0000
#define IGU_MEM_MSIX_BASE 0x0000
#define IGU_MEM_MSIX_UPPER 0x0101
#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
#define IGU_MEM_PBA_MSIX_BASE 0x0200
#define IGU_MEM_PBA_MSIX_UPPER 0x0202
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
#define IGU_CMD_INT_ACK_BASE 0x0400
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
MAX_TOT_SB_PER_PATH - \
1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
MAX_TOT_SB_PER_PATH - \
1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
/* PXP CONSTANTS */
/*****************/
/* PTT and GTT */
#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
#define PXP_PF_WINDOW_ADMIN_START 0
#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
PXP_PF_WINDOW_ADMIN_LENGTH - 1)
#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
PXP_PER_PF_ENTRY_SIZE)
#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
PXP_GLOBAL_ENTRY_SIZE)
#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
(PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0
#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4
#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
(PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
(PXP_EXTERNAL_BAR_PF_WINDOW_START + \
PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
(PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
/******************/
/* PBF CONSTANTS */
/******************/
/* Number of PBF command queue lines. Each line is 32B. */
#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
#define BTB_MAX_BLOCKS 1440
/*****************/
/* PRS CONSTANTS */
/*****************/
/* Async data KCQ CQE */
struct async_data {
__le32 cid;
__le16 itid;
u8 error_code;
u8 fw_debug_param;
};
struct regpair {
__le32 lo /* low word for reg-pair */;
__le32 hi /* high word for reg-pair */;
};
struct vf_pf_channel_eqe_data {
struct regpair msg_addr /* VF-PF message address */;
};
struct iscsi_eqe_data {
__le32 cid /* Context ID of the connection */;
__le16 conn_id
/* Task Id of the task (for error that happened on a a task) */;
u8 error_code;
u8 reserved0;
};
/*
* Event Ring malicious VF data
*/
struct malicious_vf_eqe_data {
u8 vf_id /* Malicious VF ID */; /* WARNING:CAMELCASE */
u8 err_id /* Malicious VF error */;
__le16 reserved[3];
};
/*
* Event Ring initial cleanup data
*/
struct initial_cleanup_eqe_data {
u8 vf_id /* VF ID */; /* WARNING:CAMELCASE */
u8 reserved[7];
};
union event_ring_data {
u8 bytes[8] /* Byte Array */;
struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
struct regpair roce_handle /* WARNING:CAMELCASE */
/* Dedicated field for RoCE affiliated asynchronous error */;
struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
struct initial_cleanup_eqe_data vf_init_cleanup
/* VF Initial Cleanup data */;
};
/* Event Ring Entry */
struct event_ring_entry {
u8 protocol_id;
u8 opcode;
__le16 reserved0;
__le16 echo;
u8 fw_return_code;
u8 flags;
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
union event_ring_data data;
};
/* Multi function mode */
enum mf_mode {
SF,
MF_OVLAN,
MF_NPAR,
MAX_MF_MODE
};
/* Per-protocol connection types */
enum protocol_type {
PROTOCOLID_ISCSI /* iSCSI */,
PROTOCOLID_FCOE /* FCoE */,
PROTOCOLID_ROCE /* RoCE */,
PROTOCOLID_CORE /* Core (light L2, slow path core) */,
PROTOCOLID_ETH /* Ethernet */,
PROTOCOLID_IWARP /* iWARP */,
PROTOCOLID_TOE /* TOE */,
PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */,
PROTOCOLID_COMMON /* ProtocolCommon */,
PROTOCOLID_TCP /* TCP */,
MAX_PROTOCOL_TYPE
};
/* status block structure */
struct cau_pi_entry {
u32 prod;
#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
#define CAU_PI_ENTRY_RESERVED_SHIFT 24
};
/* status block structure */
struct cau_sb_entry {
u32 data;
#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
#define CAU_SB_ENTRY_STATE0_MASK 0xF
#define CAU_SB_ENTRY_STATE0_SHIFT 24
#define CAU_SB_ENTRY_STATE1_MASK 0xF
#define CAU_SB_ENTRY_STATE1_SHIFT 28
u32 params;
#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
#define CAU_SB_ENTRY_TPH_MASK 0x1
#define CAU_SB_ENTRY_TPH_SHIFT 31
};
/* core doorbell data */
struct core_db_data {
u8 params;
#define CORE_DB_DATA_DEST_MASK 0x3
#define CORE_DB_DATA_DEST_SHIFT 0
#define CORE_DB_DATA_AGG_CMD_MASK 0x3
#define CORE_DB_DATA_AGG_CMD_SHIFT 2
#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
#define CORE_DB_DATA_RESERVED_MASK 0x1
#define CORE_DB_DATA_RESERVED_SHIFT 5
#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 spq_prod;
};
/* Enum of doorbell aggregative command selection */
enum db_agg_cmd_sel {
DB_AGG_CMD_NOP,
DB_AGG_CMD_SET,
DB_AGG_CMD_ADD,
DB_AGG_CMD_MAX,
MAX_DB_AGG_CMD_SEL
};
/* Enum of doorbell destination */
enum db_dest {
DB_DEST_XCM,
DB_DEST_UCM,
DB_DEST_TCM,
DB_NUM_DESTINATIONS,
MAX_DB_DEST
};
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
#define DB_LEGACY_ADDR_DEMS_MASK 0x7
#define DB_LEGACY_ADDR_DEMS_SHIFT 2
#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
/* Igu interrupt command */
enum igu_int_cmd {
IGU_INT_ENABLE = 0,
IGU_INT_DISABLE = 1,
IGU_INT_NOP = 2,
IGU_INT_NOP2 = 3,
MAX_IGU_INT_CMD
};
/* IGU producer or consumer update command */
struct igu_prod_cons_update {
u32 sb_id_and_flags;
#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
u32 reserved1;
};
/* Igu segments access for default status block only */
enum igu_seg_access {
IGU_SEG_ACCESS_REG = 0,
IGU_SEG_ACCESS_ATTN = 1,
MAX_IGU_SEG_ACCESS
};
struct parsing_and_err_flags {
__le16 flags;
#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
/* Concrete Function ID. */
struct pxp_concrete_fid {
__le16 fid;
#define PXP_CONCRETE_FID_PFID_MASK 0xF
#define PXP_CONCRETE_FID_PFID_SHIFT 0
#define PXP_CONCRETE_FID_PORT_MASK 0x3
#define PXP_CONCRETE_FID_PORT_SHIFT 4
#define PXP_CONCRETE_FID_PATH_MASK 0x1
#define PXP_CONCRETE_FID_PATH_SHIFT 6
#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
#define PXP_CONCRETE_FID_VFID_MASK 0xFF
#define PXP_CONCRETE_FID_VFID_SHIFT 8
};
struct pxp_pretend_concrete_fid {
__le16 fid;
#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
};
union pxp_pretend_fid {
struct pxp_pretend_concrete_fid concrete_fid;
__le16 opaque_fid;
};
/* Pxp Pretend Command Register. */
struct pxp_pretend_cmd {
union pxp_pretend_fid fid;
__le16 control;
#define PXP_PRETEND_CMD_PATH_MASK 0x1
#define PXP_PRETEND_CMD_PATH_SHIFT 0
#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
#define PXP_PRETEND_CMD_PORT_MASK 0x3
#define PXP_PRETEND_CMD_PORT_SHIFT 2
#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
};
/* PTT Record in PXP Admin Window. */
struct pxp_ptt_entry {
__le32 offset;
#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
struct pxp_pretend_cmd pretend;
};
/* RSS hash type */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
RSS_HASH_TYPE_TCP_IPV4 = 2,
RSS_HASH_TYPE_IPV6 = 3,
RSS_HASH_TYPE_TCP_IPV6 = 4,
RSS_HASH_TYPE_UDP_IPV4 = 5,
RSS_HASH_TYPE_UDP_IPV6 = 6,
MAX_RSS_HASH_TYPE
};
/* status block structure */
struct status_block {
__le16 pi_array[PIS_PER_SB];
__le32 sb_num;
#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
#define STATUS_BLOCK_SB_NUM_SHIFT 0
#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
__le32 prod_index;
#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
/* @DPDK */
#define X_FINAL_CLEANUP_AGG_INT 1
#define SDM_COMP_TYPE_AGG_INT 2
#define MAX_NUM_LL2_RX_QUEUES 32
#define QM_PQ_ELEMENT_SIZE 4
#define PXP_VF_BAR0_START_IGU 0
#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
#define TSTORM_QZONE_SIZE 8
#define MSTORM_QZONE_SIZE 16
#define USTORM_QZONE_SIZE 8
#define XSTORM_QZONE_SIZE 0
#define YSTORM_QZONE_SIZE 8
#define PSTORM_QZONE_SIZE 0
/* VF BAR */
#define PXP_VF_BAR0 0
#define PXP_VF_BAR0_START_GRC 0x3E00
#define PXP_VF_BAR0_GRC_LENGTH 0x200
#define PXP_VF_BAR0_END_GRC \
(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
#define PXP_VF_BAR0_END_IGU \
(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
#define PXP_VF_BAR0_START_DQ 0x3000
#define PXP_VF_BAR0_DQ_LENGTH 0x200
#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
#define PXP_VF_BAR0_END_DQ \
(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
#define PXP_VF_BAR0_END_TSDM_ZONE_B \
(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
#define PXP_VF_BAR0_END_MSDM_ZONE_B \
(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
#define PXP_VF_BAR0_END_USDM_ZONE_B \
(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
#define PXP_VF_BAR0_END_XSDM_ZONE_B \
(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
#define PXP_VF_BAR0_END_YSDM_ZONE_B \
(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
#define PXP_VF_BAR0_END_PSDM_ZONE_B \
(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
#endif /* __COMMON_HSI__ */

View File

@ -0,0 +1,742 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_H
#define __ECORE_H
#include "ecore_hsi_common.h"
#include "ecore_hsi_tools.h"
#include "ecore_proto_if.h"
#include "mcp_public.h"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 64 /* @DPDK */
#define VER_SIZE 16
/* @DPDK ARRAY_DECL */
#define ECORE_WFQ_UNIT 100
#include "../qede_logs.h" /* @DPDK */
/* Constants */
#define ECORE_WID_SIZE (1024)
/* Configurable */
#define ECORE_PF_DEMS_SIZE (4)
/* cau states */
enum ecore_coalescing_mode {
ECORE_COAL_MODE_DISABLE,
ECORE_COAL_MODE_ENABLE
};
enum ecore_nvm_cmd {
ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
};
#ifndef LINUX_REMOVE
#if !defined(CONFIG_ECORE_L2)
#define CONFIG_ECORE_L2
#endif
#endif
/* helpers */
#ifndef __EXTRACT__LINUX__
#define MASK_FIELD(_name, _value) \
((_value) &= (_name##_MASK))
#define FIELD_VALUE(_name, _value) \
((_value & _name##_MASK) << _name##_SHIFT)
#define SET_FIELD(value, name, flag) \
do { \
(value) &= ~(name##_MASK << name##_SHIFT); \
(value) |= (((u64)flag) << (name##_SHIFT)); \
} while (0)
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
#endif
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
(cid * ECORE_PF_DEMS_SIZE);
return db_addr;
}
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
#ifndef U64_HI
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
#endif
#ifndef U64_LO
#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
#endif
#ifndef __EXTRACT__LINUX__
enum DP_LEVEL {
ECORE_LEVEL_VERBOSE = 0x0,
ECORE_LEVEL_INFO = 0x1,
ECORE_LEVEL_NOTICE = 0x2,
ECORE_LEVEL_ERR = 0x3,
};
#define ECORE_LOG_LEVEL_SHIFT (30)
#define ECORE_LOG_VERBOSE_MASK (0x3fffffff)
#define ECORE_LOG_INFO_MASK (0x40000000)
#define ECORE_LOG_NOTICE_MASK (0x80000000)
enum DP_MODULE {
#ifndef LINUX_REMOVE
ECORE_MSG_DRV = 0x0001,
ECORE_MSG_PROBE = 0x0002,
ECORE_MSG_LINK = 0x0004,
ECORE_MSG_TIMER = 0x0008,
ECORE_MSG_IFDOWN = 0x0010,
ECORE_MSG_IFUP = 0x0020,
ECORE_MSG_RX_ERR = 0x0040,
ECORE_MSG_TX_ERR = 0x0080,
ECORE_MSG_TX_QUEUED = 0x0100,
ECORE_MSG_INTR = 0x0200,
ECORE_MSG_TX_DONE = 0x0400,
ECORE_MSG_RX_STATUS = 0x0800,
ECORE_MSG_PKTDATA = 0x1000,
ECORE_MSG_HW = 0x2000,
ECORE_MSG_WOL = 0x4000,
#endif
ECORE_MSG_SPQ = 0x10000,
ECORE_MSG_STATS = 0x20000,
ECORE_MSG_DCB = 0x40000,
ECORE_MSG_IOV = 0x80000,
ECORE_MSG_SP = 0x100000,
ECORE_MSG_STORAGE = 0x200000,
ECORE_MSG_CXT = 0x800000,
ECORE_MSG_ILT = 0x2000000,
ECORE_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
#endif
#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++)
#define D_TRINE(val, cond1, cond2, true1, true2, def) \
(val == (cond1) ? true1 : \
(val == (cond2) ? true2 : def))
/* forward */
struct ecore_ptt_pool;
struct ecore_spq;
struct ecore_sb_info;
struct ecore_sb_attn_info;
struct ecore_cxt_mngr;
struct ecore_dma_mem;
struct ecore_sb_sp_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_rt_data {
u32 *init_val;
bool *b_valid;
};
enum ecore_tunn_mode {
ECORE_MODE_L2GENEVE_TUNN,
ECORE_MODE_IPGENEVE_TUNN,
ECORE_MODE_L2GRE_TUNN,
ECORE_MODE_IPGRE_TUNN,
ECORE_MODE_VXLAN_TUNN,
};
enum ecore_tunn_clss {
ECORE_TUNN_CLSS_MAC_VLAN,
ECORE_TUNN_CLSS_MAC_VNI,
ECORE_TUNN_CLSS_INNER_MAC_VLAN,
ECORE_TUNN_CLSS_INNER_MAC_VNI,
MAX_ECORE_TUNN_CLSS,
};
struct ecore_tunn_start_params {
unsigned long tunn_mode;
u16 vxlan_udp_port;
u16 geneve_udp_port;
u8 update_vxlan_udp_port;
u8 update_geneve_udp_port;
u8 tunn_clss_vxlan;
u8 tunn_clss_l2geneve;
u8 tunn_clss_ipgeneve;
u8 tunn_clss_l2gre;
u8 tunn_clss_ipgre;
};
struct ecore_tunn_update_params {
unsigned long tunn_mode_update_mask;
unsigned long tunn_mode;
u16 vxlan_udp_port;
u16 geneve_udp_port;
u8 update_rx_pf_clss;
u8 update_tx_pf_clss;
u8 update_vxlan_udp_port;
u8 update_geneve_udp_port;
u8 tunn_clss_vxlan;
u8 tunn_clss_l2geneve;
u8 tunn_clss_ipgeneve;
u8 tunn_clss_l2gre;
u8 tunn_clss_ipgre;
};
struct ecore_hw_sriov_info {
/* standard SRIOV capability fields, mostly for debugging */
int pos; /* capability position */
int nres; /* number of resources */
u32 cap; /* SR-IOV Capabilities */
u16 ctrl; /* SR-IOV Control */
u16 total_vfs; /* total VFs associated with the PF */
u16 num_vfs; /* number of vfs that have been started */
u64 active_vfs[3]; /* bitfield of active vfs */
#define ECORE_IS_VF_ACTIVE(_p_dev, _rel_vf_id) \
(!!(_p_dev->sriov_info.active_vfs[_rel_vf_id / 64] & \
(1ULL << (_rel_vf_id % 64))))
u16 initial_vfs; /* initial VFs associated with the PF */
u16 nr_virtfn; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
u16 vf_device_id; /* VF device id */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
bool b_hw_channel; /* Whether PF uses the HW-channel */
};
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol
*/
enum ecore_pci_personality {
ECORE_PCI_ETH,
ECORE_PCI_DEFAULT /* default in shmem */
};
/* All VFs are symmetric, all counters are PF + all VFs */
struct ecore_qm_iids {
u32 cids;
u32 vf_cids;
u32 tids;
};
#define MAX_PF_PER_PORT 8
/*@@@TBD MK RESC: need to remove and use MCP interface instead */
/* HW / FW resources, output of features supported below, most information
* is received from MFW.
*/
enum ECORE_RESOURCES {
ECORE_SB,
ECORE_L2_QUEUE,
ECORE_VPORT,
ECORE_RSS_ENG,
ECORE_PQ,
ECORE_RL,
ECORE_MAC,
ECORE_VLAN,
ECORE_ILT,
ECORE_CMDQS_CQS,
ECORE_MAX_RESC,
};
/* Features that require resources, given as input to the resource management
* algorithm, the output are the resources above
*/
enum ECORE_FEATURE {
ECORE_PF_L2_QUE,
ECORE_PF_TC,
ECORE_VF,
ECORE_EXTRA_VF_QUE,
ECORE_VMQ,
ECORE_MAX_FEATURES,
};
enum ECORE_PORT_MODE {
ECORE_PORT_MODE_DE_2X40G,
ECORE_PORT_MODE_DE_2X50G,
ECORE_PORT_MODE_DE_1X100G,
ECORE_PORT_MODE_DE_4X10G_F,
ECORE_PORT_MODE_DE_4X10G_E,
ECORE_PORT_MODE_DE_4X20G,
ECORE_PORT_MODE_DE_1X40G,
ECORE_PORT_MODE_DE_2X25G,
ECORE_PORT_MODE_DE_1X25G
};
enum ecore_dev_cap {
ECORE_DEV_CAP_ETH,
};
#ifndef __EXTRACT__LINUX__
enum ecore_hw_err_type {
ECORE_HW_ERR_FAN_FAIL,
ECORE_HW_ERR_MFW_RESP_FAIL,
ECORE_HW_ERR_HW_ATTN,
ECORE_HW_ERR_DMAE_FAIL,
ECORE_HW_ERR_RAMROD_FAIL,
ECORE_HW_ERR_FW_ASSERT,
};
#endif
struct ecore_hw_info {
/* PCI personality */
enum ecore_pci_personality personality;
/* Resource Allocation scheme results */
u32 resc_start[ECORE_MAX_RESC];
u32 resc_num[ECORE_MAX_RESC];
u32 feat_num[ECORE_MAX_FEATURES];
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
u8 num_tc;
u8 ooo_tc;
u8 offload_tc;
u8 non_offload_tc;
u32 concrete_fid;
u16 opaque_fid;
u16 ovlan;
u32 part_num[4];
unsigned char hw_mac_addr[ETH_ALEN];
struct ecore_igu_info *p_igu_info;
/* Sriov */
u32 first_vf_in_pf;
u8 max_chains_per_vf;
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
};
struct ecore_hw_cid_data {
u32 cid;
bool b_cid_allocated;
u8 vfid; /* 1-based; 0 signals this is for a PF */
/* Additional identifiers */
u16 opaque_fid;
u8 vport_id;
};
/* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE 0x2000
struct ecore_dmae_info {
/* Mutex for synchronizing access to functions */
osal_mutex_t mutex;
u8 channel;
dma_addr_t completion_word_phys_addr;
/* The memory location where the DMAE writes the completion
* value when an operation is finished on this context.
*/
u32 *p_completion_word;
dma_addr_t intermediate_buffer_phys_addr;
/* An intermediate buffer for DMAE operations that use virtual
* addresses - data is DMA'd to/from this buffer and then
* memcpy'd to/from the virtual address
*/
u32 *p_intermediate_buffer;
dma_addr_t dmae_cmd_phys_addr;
struct dmae_cmd *p_dmae_cmd;
};
struct ecore_wfq_data {
u32 default_min_speed; /* When wfq feature is not configured */
u32 min_speed; /* when feature is configured for any 1 vport */
bool configured;
};
struct ecore_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
u8 pure_lb_pq;
u8 offload_pq;
u8 pure_ack_pq;
u8 ooo_pq;
u8 vf_queues_offset;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
bool vport_wfq_en;
u8 pf_wfq;
u32 pf_rl;
struct ecore_wfq_data *wfq_data;
};
struct storm_stats {
u32 address;
u32 len;
};
#define CONFIG_ECORE_BINARY_FW
#define CONFIG_ECORE_ZIPPED_FW
struct ecore_fw_data {
#ifdef CONFIG_ECORE_BINARY_FW
struct fw_ver_info *fw_ver_info;
#endif
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
u32 init_ops_size;
};
struct ecore_hwfn {
struct ecore_dev *p_dev;
u8 my_id; /* ID inside the PF */
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine */
u8 abs_pf_id;
#define ECORE_PATH_ID(_p_hwfn) \
(ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
u8 port_id;
bool b_active;
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
void *dp_ctx;
bool first_on_engine;
bool hw_init_done;
u8 num_funcs_on_engine;
/* BAR access */
void OSAL_IOMEM *regview;
void OSAL_IOMEM *doorbells;
u64 db_phys_addr;
unsigned long db_size;
/* PTT pool */
struct ecore_ptt_pool *p_ptt_pool;
/* HW info */
struct ecore_hw_info hw_info;
/* rt_array (for init-tool) */
struct ecore_rt_data rt_data;
/* SPQ */
struct ecore_spq *p_spq;
/* EQ */
struct ecore_eq *p_eq;
/* Consolidate Q */
struct ecore_consq *p_consq;
/* Slow-Path definitions */
osal_dpc_t sp_dpc;
bool b_sp_dpc_enabled;
struct ecore_ptt *p_main_ptt;
struct ecore_ptt *p_dpc_ptt;
struct ecore_sb_sp_info *p_sp_sb;
struct ecore_sb_attn_info *p_sb_attn;
/* Protocol related */
struct ecore_ooo_info *p_ooo_info;
struct ecore_pf_params pf_params;
/* Array of sb_info of all status blocks */
struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
u16 num_sbs;
struct ecore_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not */
bool b_int_enabled;
bool b_int_requested;
/* True if the driver requests for the link */
bool b_drv_link_init;
struct ecore_vf_iov *vf_iov_info;
struct ecore_pf_iov *pf_iov_info;
struct ecore_mcp_info *mcp_info;
struct ecore_hw_cid_data *p_tx_cids;
struct ecore_hw_cid_data *p_rx_cids;
struct ecore_dmae_info dmae_info;
/* QM init */
struct ecore_qm_info qm_info;
/* Buffer for unzipping firmware data */
#ifdef CONFIG_ECORE_ZIPPED_FW
void *unzip_buf;
#endif
struct dbg_tools_data dbg_info;
struct z_stream_s *stream;
/* PWM region specific data */
u32 dpi_size;
u32 dpi_count;
u32 dpi_start_offset; /* this is used to
* calculate th
* doorbell address
*/
};
#ifndef __EXTRACT__LINUX__
enum ecore_mf_mode {
ECORE_MF_DEFAULT,
ECORE_MF_OVLAN,
ECORE_MF_NPAR,
};
#endif
struct ecore_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
void *dp_ctx;
u8 type;
#define ECORE_DEV_TYPE_BB (0 << 0)
#define ECORE_DEV_TYPE_AH (1 << 0)
/* Translate type/revision combo into the proper conditions */
#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && \
CHIP_REV_IS_A0(dev))
#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
#define ECORE_GET_TYPE(dev) (ECORE_IS_BB_A0(dev) ? CHIP_BB_A0 : \
ECORE_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
u16 vendor_id;
u16 device_id;
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 16
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
#ifndef ASIC_ONLY
#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_EMUL_B0(_p_dev))
#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev))
#define CHIP_REV_IS_SLOW(_p_dev) \
(CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
#define CHIP_REV_IS_A0(_p_dev) \
(CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_FPGA_A0(_p_dev) || \
!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) \
(CHIP_REV_IS_EMUL_B0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev) || \
(_p_dev)->chip_rev == 1)
#define CHIP_REV_IS_ASIC(_p_dev) (!CHIP_REV_IS_SLOW(_p_dev))
#else
#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
#endif
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 4
u16 chip_bond_id;
#define CHIP_BOND_ID_MASK 0xf
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
u8 num_ports_in_engines;
u8 num_funcs_in_port;
u8 path_id;
enum ecore_mf_mode mf_mode;
#define IS_MF_DEFAULT(_p_hwfn) \
(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
int pcie_width;
int pcie_speed;
u8 ver_str[VER_SIZE];
/* Add MF related configuration */
u8 mcp_rev;
u8 boot_mode;
u8 wol;
u32 int_mode;
enum ecore_coalescing_mode int_coalescing_mode;
u8 rx_coalesce_usecs;
u8 tx_coalesce_usecs;
/* Start Bar offset of first hwfn */
void OSAL_IOMEM *regview;
void OSAL_IOMEM *doorbells;
u64 db_phys_addr;
unsigned long db_size;
/* PCI */
u8 cache_shift;
/* Init */
const struct iro *iro_arr;
#define IRO (p_hwfn->p_dev->iro_arr)
/* HW functions */
u8 num_hwfns;
struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
/* SRIOV */
struct ecore_hw_sriov_info sriov_info;
unsigned long tunn_mode;
#define IS_ECORE_SRIOV(edev) (!!((edev)->sriov_info.total_vfs))
bool b_is_vf;
u32 drv_type;
struct ecore_eth_stats *reset_stats;
struct ecore_fw_data *fw_data;
u32 mcp_nvm_resp;
/* Recovery */
bool recov_in_prog;
#ifndef ASIC_ONLY
bool b_is_emul_full;
#endif
void *firmware;
u64 fw_len;
};
#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
: MAX_NUM_VFS_K2)
#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
: MAX_NUM_L2_QUEUES_K2)
#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
: MAX_NUM_PORTS_K2)
#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
: MAX_SB_PER_PATH_K2)
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
(ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
(ECORE_PATH_ID(p_hwfn) == 1) && \
((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
(p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
(p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
* the concrete value.
*
* @param concrete_fid
*
* @return OSAL_INLINE u8
*/
static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
u8 sw_fid;
if (vf_valid)
sw_fid = vfid + MAX_NUM_PFS;
else
sw_fid = pfid;
return sw_fid;
}
#define PURE_LB_TC 8
#define OOO_LB_TC 9
static OSAL_INLINE u16 ecore_sriov_get_next_vf(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id)
{
u16 i;
for (i = rel_vf_id; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, i))
return i;
return p_hwfn->p_dev->sriov_info.total_vfs;
}
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
u32 min_pf_rate);
int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
int ecore_device_num_engines(struct ecore_dev *p_dev);
int ecore_device_num_ports(struct ecore_dev *p_dev);
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_sriov_get_next_vf(_p_hwfn, 0); \
_i < _p_hwfn->p_dev->sriov_info.total_vfs; \
_i = ecore_sriov_get_next_vf(_p_hwfn, _i + 1))
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
#endif /* __ECORE_H */

View File

@ -0,0 +1,718 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_CHAIN_H__
#define __ECORE_CHAIN_H__
#include <assert.h> /* @DPDK */
#include "common_hsi.h"
#include "ecore_utils.h"
enum ecore_chain_mode {
/* Each Page contains a next pointer at its end */
ECORE_CHAIN_MODE_NEXT_PTR,
/* Chain is a single page (next ptr) is unrequired */
ECORE_CHAIN_MODE_SINGLE,
/* Page pointers are located in a side list */
ECORE_CHAIN_MODE_PBL,
};
enum ecore_chain_use_mode {
ECORE_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
ECORE_CHAIN_USE_TO_CONSUME, /* Chain starts full */
ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
};
enum ecore_chain_cnt_type {
/* The chain's size/prod/cons are kept in 16-bit variables */
ECORE_CHAIN_CNT_TYPE_U16,
/* The chain's size/prod/cons are kept in 32-bit variables */
ECORE_CHAIN_CNT_TYPE_U32,
};
struct ecore_chain_next {
struct regpair next_phys;
void *next_virt;
};
struct ecore_chain_pbl_u16 {
u16 prod_page_idx;
u16 cons_page_idx;
};
struct ecore_chain_pbl_u32 {
u32 prod_page_idx;
u32 cons_page_idx;
};
struct ecore_chain_pbl {
/* Base address of a pre-allocated buffer for pbl */
dma_addr_t p_phys_table;
void *p_virt_table;
/* Table for keeping the virtual addresses of the chain pages,
* respectively to the physical addresses in the pbl table.
*/
void **pp_virt_addr_tbl;
/* Index to current used page by producer/consumer */
union {
struct ecore_chain_pbl_u16 pbl16;
struct ecore_chain_pbl_u32 pbl32;
} u;
};
struct ecore_chain_u16 {
/* Cyclic index of next element to produce/consme */
u16 prod_idx;
u16 cons_idx;
};
struct ecore_chain_u32 {
/* Cyclic index of next element to produce/consme */
u32 prod_idx;
u32 cons_idx;
};
struct ecore_chain {
/* Address of first page of the chain */
void *p_virt_addr;
dma_addr_t p_phys_addr;
/* Point to next element to produce/consume */
void *p_prod_elem;
void *p_cons_elem;
enum ecore_chain_mode mode;
enum ecore_chain_use_mode intended_use;
enum ecore_chain_cnt_type cnt_type;
union {
struct ecore_chain_u16 chain16;
struct ecore_chain_u32 chain32;
} u;
u32 page_cnt;
/* Number of elements - capacity is for usable elements only,
* while size will contain total number of elements [for entire chain].
*/
u32 capacity;
u32 size;
/* Elements information for fast calculations */
u16 elem_per_page;
u16 elem_per_page_mask;
u16 elem_unusable;
u16 usable_per_page;
u16 elem_size;
u16 next_page_mask;
struct ecore_chain_pbl pbl;
};
#define ECORE_CHAIN_PBL_ENTRY_SIZE (8)
#define ECORE_CHAIN_PAGE_SIZE (0x1000)
#define ELEMS_PER_PAGE(elem_size) (ECORE_CHAIN_PAGE_SIZE / (elem_size))
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \
(1 + ((sizeof(struct ecore_chain_next) - 1) / \
(elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \
UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
#define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
#define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
/* Accessors */
static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u16(p_chain));
return p_chain->u.chain16.prod_idx;
}
static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u32(p_chain));
return p_chain->u.chain32.prod_idx;
}
static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u16(p_chain));
return p_chain->u.chain16.cons_idx;
}
static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u32(p_chain));
return p_chain->u.chain32.cons_idx;
}
/* FIXME:
* Should create OSALs for the below definitions.
* For Linux, replace them with the existing U16_MAX and U32_MAX, and handle
* kernel versions that lack them.
*/
#define ECORE_U16_MAX ((u16)~0U)
#define ECORE_U32_MAX ((u32)~0U)
static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
{
u16 used;
OSAL_ASSERT(is_chain_u16(p_chain));
used = (u16)(((u32)ECORE_U16_MAX + 1 +
(u32)(p_chain->u.chain16.prod_idx)) -
(u32)p_chain->u.chain16.cons_idx);
if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
return (u16)(p_chain->capacity - used);
}
static OSAL_INLINE u32
ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
{
u32 used;
OSAL_ASSERT(is_chain_u32(p_chain));
used = (u32)(((u64)ECORE_U32_MAX + 1 +
(u64)(p_chain->u.chain32.prod_idx)) -
(u64)p_chain->u.chain32.cons_idx);
if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
return p_chain->capacity - used;
}
static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
{
if (is_chain_u16(p_chain))
return (ecore_chain_get_elem_left(p_chain) ==
p_chain->capacity);
else
return (ecore_chain_get_elem_left_u32(p_chain) ==
p_chain->capacity);
}
static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain)
{
if (is_chain_u16(p_chain))
return (ecore_chain_get_elem_left(p_chain) == 0);
else
return (ecore_chain_get_elem_left_u32(p_chain) == 0);
}
static OSAL_INLINE
u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
{
return p_chain->elem_per_page;
}
static OSAL_INLINE
u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
{
return p_chain->usable_per_page;
}
static OSAL_INLINE
u16 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
{
return p_chain->elem_unusable;
}
static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
{
return p_chain->size;
}
static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
{
return p_chain->page_cnt;
}
/**
* @brief ecore_chain_advance_page -
*
* Advance the next element accros pages for a linked chain
*
* @param p_chain
* @param p_next_elem
* @param idx_to_inc
* @param page_to_inc
*/
static OSAL_INLINE void
ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
void *idx_to_inc, void *page_to_inc)
{
struct ecore_chain_next *p_next = OSAL_NULL;
u32 page_index = 0;
switch (p_chain->mode) {
case ECORE_CHAIN_MODE_NEXT_PTR:
p_next = (struct ecore_chain_next *)(*p_next_elem);
*p_next_elem = p_next->next_virt;
if (is_chain_u16(p_chain))
*(u16 *)idx_to_inc += p_chain->elem_unusable;
else
*(u32 *)idx_to_inc += p_chain->elem_unusable;
break;
case ECORE_CHAIN_MODE_SINGLE:
*p_next_elem = p_chain->p_virt_addr;
break;
case ECORE_CHAIN_MODE_PBL:
if (is_chain_u16(p_chain)) {
if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
*(u16 *)page_to_inc = 0;
page_index = *(u16 *)page_to_inc;
} else {
if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
*(u32 *)page_to_inc = 0;
page_index = *(u32 *)page_to_inc;
}
*p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
}
}
#define is_unusable_idx(p, idx) \
(((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define is_unusable_idx_u32(p, idx) \
(((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define is_unusable_next_idx(p, idx) \
((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
(p)->usable_per_page)
#define is_unusable_next_idx_u32(p, idx) \
((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) \
== (p)->usable_per_page)
#define test_and_skip(p, idx) \
do { \
if (is_chain_u16(p)) { \
if (is_unusable_idx(p, idx)) \
(p)->u.chain16.idx += (p)->elem_unusable; \
} else { \
if (is_unusable_idx_u32(p, idx)) \
(p)->u.chain32.idx += (p)->elem_unusable; \
} \
} while (0)
/**
* @brief ecore_chain_return_multi_produced -
*
* A chain in which the driver "Produces" elements should use this API
* to indicate previous produced elements are now consumed.
*
* @param p_chain
* @param num
*/
static OSAL_INLINE
void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
{
if (is_chain_u16(p_chain))
p_chain->u.chain16.cons_idx += (u16)num;
else
p_chain->u.chain32.cons_idx += num;
test_and_skip(p_chain, cons_idx);
}
/**
* @brief ecore_chain_return_produced -
*
* A chain in which the driver "Produces" elements should use this API
* to indicate previous produced elements are now consumed.
*
* @param p_chain
*/
static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain)
{
if (is_chain_u16(p_chain))
p_chain->u.chain16.cons_idx++;
else
p_chain->u.chain32.cons_idx++;
test_and_skip(p_chain, cons_idx);
}
/**
* @brief ecore_chain_produce -
*
* A chain in which the driver "Produces" elements should use this to get
* a pointer to the next element which can be "Produced". It's driver
* responsibility to validate that the chain has room for new element.
*
* @param p_chain
*
* @return void*, a pointer to next element
*/
static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
{
void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx;
if (is_chain_u16(p_chain)) {
if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx;
p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
p_chain->u.chain16.prod_idx++;
} else {
if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx;
p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
p_chain->u.chain32.prod_idx++;
}
p_ret = p_chain->p_prod_elem;
p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
p_chain->elem_size);
return p_ret;
}
/**
* @brief ecore_chain_get_capacity -
*
* Get the maximum number of BDs in chain
*
* @param p_chain
* @param num
*
* @return number of unusable BDs
*/
static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain)
{
return p_chain->capacity;
}
/**
* @brief ecore_chain_recycle_consumed -
*
* Returns an element which was previously consumed;
* Increments producers so they could be written to FW.
*
* @param p_chain
*/
static OSAL_INLINE
void ecore_chain_recycle_consumed(struct ecore_chain *p_chain)
{
test_and_skip(p_chain, prod_idx);
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx++;
else
p_chain->u.chain32.prod_idx++;
}
/**
* @brief ecore_chain_consume -
*
* A Chain in which the driver utilizes data written by a different source
* (i.e., FW) should use this to access passed buffers.
*
* @param p_chain
*
* @return void*, a pointer to the next buffer written
*/
static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
{
void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx;
if (is_chain_u16(p_chain)) {
if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx;
p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
p_chain->u.chain16.cons_idx++;
} else {
if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx;
p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
p_chain->u.chain32.cons_idx++;
}
p_ret = p_chain->p_cons_elem;
p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
p_chain->elem_size);
return p_ret;
}
/**
* @brief ecore_chain_reset -
*
* Resets the chain to its start state
*
* @param p_chain pointer to a previously allocted chain
*/
static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
{
u32 i;
if (is_chain_u16(p_chain)) {
p_chain->u.chain16.prod_idx = 0;
p_chain->u.chain16.cons_idx = 0;
} else {
p_chain->u.chain32.prod_idx = 0;
p_chain->u.chain32.cons_idx = 0;
}
p_chain->p_cons_elem = p_chain->p_virt_addr;
p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
/* Use (page_cnt - 1) as a reset value for the prod/cons page's
* indices, to avoid unnecessary page advancing on the first
* call to ecore_chain_produce/consume. Instead, the indices
* will be advanced to page_cnt and then will be wrapped to 0.
*/
u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) {
p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
} else {
p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
}
}
switch (p_chain->intended_use) {
case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
case ECORE_CHAIN_USE_TO_PRODUCE:
/* Do nothing */
break;
case ECORE_CHAIN_USE_TO_CONSUME:
/* produce empty elements */
for (i = 0; i < p_chain->capacity; i++)
ecore_chain_recycle_consumed(p_chain);
break;
}
}
/**
* @brief ecore_chain_init_params -
*
* Initalizes a basic chain struct
*
* @param p_chain
* @param page_cnt number of pages in the allocated buffer
* @param elem_size size of each element in the chain
* @param intended_use
* @param mode
* @param cnt_type
*/
static OSAL_INLINE void
ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
enum ecore_chain_use_mode intended_use,
enum ecore_chain_mode mode,
enum ecore_chain_cnt_type cnt_type)
{
/* chain fixed parameters */
p_chain->p_virt_addr = OSAL_NULL;
p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
p_chain->intended_use = intended_use;
p_chain->mode = mode;
p_chain->cnt_type = cnt_type;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask);
p_chain->page_cnt = page_cnt;
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
p_chain->pbl.p_phys_table = 0;
p_chain->pbl.p_virt_table = OSAL_NULL;
p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
}
/**
* @brief ecore_chain_init_mem -
*
* Initalizes a basic chain struct with its chain buffers
*
* @param p_chain
* @param p_virt_addr virtual address of allocated buffer's beginning
* @param p_phys_addr physical address of allocated buffer's beginning
*
*/
static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain,
void *p_virt_addr,
dma_addr_t p_phys_addr)
{
p_chain->p_virt_addr = p_virt_addr;
p_chain->p_phys_addr = p_phys_addr;
}
/**
* @brief ecore_chain_init_pbl_mem -
*
* Initalizes a basic chain struct with its pbl buffers
*
* @param p_chain
* @param p_virt_pbl pointer to a pre allocated side table which will hold
* virtual page addresses.
* @param p_phys_pbl pointer to a pre-allocated side table which will hold
* physical page addresses.
* @param pp_virt_addr_tbl
* pointer to a pre-allocated side table which will hold
* the virtual addresses of the chain pages.
*
*/
static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
void *p_virt_pbl,
dma_addr_t p_phys_pbl,
void **pp_virt_addr_tbl)
{
p_chain->pbl.p_phys_table = p_phys_pbl;
p_chain->pbl.p_virt_table = p_virt_pbl;
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
/**
* @brief ecore_chain_init_next_ptr_elem -
*
* Initalizes a next pointer element
*
* @param p_chain
* @param p_virt_curr virtual address of a chain page of which the next
* pointer element is initialized
* @param p_virt_next virtual address of the next chain page
* @param p_phys_next physical address of the next chain page
*
*/
static OSAL_INLINE void
ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr,
void *p_virt_next, dma_addr_t p_phys_next)
{
struct ecore_chain_next *p_next;
u32 size;
size = p_chain->elem_size * p_chain->usable_per_page;
p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size);
DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
p_next->next_virt = p_virt_next;
}
/**
* @brief ecore_chain_get_last_elem -
*
* Returns a pointer to the last element of the chain
*
* @param p_chain
*
* @return void*
*/
static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
{
struct ecore_chain_next *p_next = OSAL_NULL;
void *p_virt_addr = OSAL_NULL;
u32 size, last_page_idx;
if (!p_chain->p_virt_addr)
goto out;
switch (p_chain->mode) {
case ECORE_CHAIN_MODE_NEXT_PTR:
size = p_chain->elem_size * p_chain->usable_per_page;
p_virt_addr = p_chain->p_virt_addr;
p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size);
while (p_next->next_virt != p_chain->p_virt_addr) {
p_virt_addr = p_next->next_virt;
p_next =
(struct ecore_chain_next *)((u8 *)p_virt_addr +
size);
}
break;
case ECORE_CHAIN_MODE_SINGLE:
p_virt_addr = p_chain->p_virt_addr;
break;
case ECORE_CHAIN_MODE_PBL:
last_page_idx = p_chain->page_cnt - 1;
p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
break;
}
/* p_virt_addr points at this stage to the last page of the chain */
size = p_chain->elem_size * (p_chain->usable_per_page - 1);
p_virt_addr = ((u8 *)p_virt_addr + size);
out:
return p_virt_addr;
}
/**
* @brief ecore_chain_set_prod - sets the prod to the given value
*
* @param prod_idx
* @param p_prod_elem
*/
static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16)prod_idx;
else
p_chain->u.chain32.prod_idx = prod_idx;
p_chain->p_prod_elem = p_prod_elem;
}
/**
* @brief ecore_chain_pbl_zero_mem - set chain memory to 0
*
* @param p_chain
*/
static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
{
u32 i, page_cnt;
if (p_chain->mode != ECORE_CHAIN_MODE_PBL)
return;
page_cnt = ecore_chain_get_page_cnt(p_chain);
for (i = 0; i < page_cnt; i++)
OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
ECORE_CHAIN_PAGE_SIZE);
}
#endif /* __ECORE_CHAIN_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,157 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _ECORE_CID_
#define _ECORE_CID_
#include "ecore_hsi_common.h"
#include "ecore_proto_if.h"
#include "ecore_cxt_api.h"
enum ecore_cxt_elem_type {
ECORE_ELEM_CXT,
ECORE_ELEM_TASK
};
u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
enum protocol_type type);
/**
* @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration
*
* @param p_hwfn
* @param iids [out], a structure holding all the counters
*/
void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids);
/**
* @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_set_proto_cid_count - Set the max cids per protocol for cxt
* init
*
* @param p_hwfn
* @param type
* @param cid_cnt - number of pf cids
* @param vf_cid_cnt - number of vf cids
*/
void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
u32 cid_cnt, u32 vf_cid_cnt);
/**
* @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_mngr_free
*
* @param p_hwfn
*/
void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
* map
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
*
* @param p_hwfn
*/
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
* path.
*
* @param p_hwfn
*/
void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
* @param p_hwfn
*/
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
*/
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
/**
* @brief Reconfigures QM pf on the fly
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_cxt_release - Release a cid
*
* @param p_hwfn
* @param cid
*/
void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
/**
* @brief ecore_cxt_free_proto_ilt - function frees ilt pages
* associated with the protocol passed.
*
* @param p_hwfn
* @param proto
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
enum protocol_type proto);
#define ECORE_CTX_WORKING_MEM 0
#define ECORE_CTX_FL_MEM 1
enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
u32 tid,
u8 ctx_type, void **task_ctx);
#endif /* _ECORE_CID_ */

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_CXT_API_H__
#define __ECORE_CXT_API_H__
struct ecore_hwfn;
struct ecore_cxt_info {
void *p_cxt;
u32 iid;
enum protocol_type type;
};
#define MAX_TID_BLOCKS 512
struct ecore_tid_mem {
u32 tid_size;
u32 num_tids_per_block;
u32 waste;
u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
};
static OSAL_INLINE void *get_task_mem(struct ecore_tid_mem *info, u32 tid)
{
/* note: waste is superfluous */
return (void *)(info->blocks[tid / info->num_tids_per_block] +
(tid % info->num_tids_per_block) * info->tid_size);
/* more elaborate alternative with no modulo
* u32 mask = info->tid_size * info->num_tids_per_block +
* info->waste - 1;
* u32 index = tid / info->num_tids_per_block;
* u32 offset = tid * info->tid_size + index * info->waste;
* return (void *)(blocks[index] + (offset & mask));
*/
}
/**
* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
*
* @param p_hwfn
* @param type
* @param p_cid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
u32 *p_cid);
/**
* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
*
*
* @param p_hwfn
* @param p_info in/out
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info);
/**
* @brief ecore_cxt_get_tid_mem_info
*
* @param p_hwfn
* @param p_info
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
struct ecore_tid_mem *p_info);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,497 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_DEV_API_H__
#define __ECORE_DEV_API_H__
#include "ecore_status.h"
#include "ecore_chain.h"
#include "ecore_int_api.h"
struct ecore_tunn_start_params;
/**
* @brief ecore_init_dp - initialize the debug level
*
* @param p_dev
* @param dp_module
* @param dp_level
* @param dp_ctx
*/
void ecore_init_dp(struct ecore_dev *p_dev,
u32 dp_module, u8 dp_level, void *dp_ctx);
/**
* @brief ecore_init_struct - initialize the device structure to
* its defaults
*
* @param p_dev
*/
void ecore_init_struct(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_free -
*
* @param p_dev
*/
void ecore_resc_free(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_alloc -
*
* @param p_dev
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_setup -
*
* @param p_dev
*/
void ecore_resc_setup(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_init -
*
* @param p_dev
* @param p_tunn - tunneling parameters
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
* @param bin_fw_data - binary fw data pointer in binary fw file.
* Pass NULL if not using binary fw file.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_tunn_start_params *p_tunn,
bool b_hw_start,
enum ecore_int_mode int_mode,
bool allow_npar_tx_switch,
const u8 *bin_fw_data);
/**
* @brief ecore_hw_timers_stop_all -
*
* @param p_dev
*
* @return void
*/
void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_stop -
*
* @param p_dev
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_stop_fastpath -should be called incase
* slowpath is still required for the device, but
* fastpath is not.
*
* @param p_dev
*
*/
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
/**
* @brief ecore_prepare_hibernate -should be called when
* the system is going into the hibernate state
*
* @param p_dev
*
*/
void ecore_prepare_hibernate(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
* @param p_dev
*
*/
void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_hw_reset -
*
* @param p_dev
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_prepare -
*
* @param p_dev
* @param personality - personality to initialize
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality);
/**
* @brief ecore_hw_remove -
*
* @param p_dev
*/
void ecore_hw_remove(struct ecore_dev *p_dev);
/**
* @brief ecore_ptt_acquire - Allocate a PTT window
*
* Should be called at the entry point to the driver (at the beginning of an
* exported function)
*
* @param p_hwfn
*
* @return struct ecore_ptt
*/
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_release - Release PTT Window
*
* Should be called at the end of a flow - at the end of the function that
* acquired the PTT.
*
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
#ifndef __EXTRACT__LINUX__
struct ecore_eth_stats {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
u64 rx_ucast_bytes;
u64 rx_mcast_bytes;
u64 rx_bcast_bytes;
u64 rx_ucast_pkts;
u64 rx_mcast_pkts;
u64 rx_bcast_pkts;
u64 mftag_filter_discards;
u64 mac_filter_discards;
u64 tx_ucast_bytes;
u64 tx_mcast_bytes;
u64 tx_bcast_bytes;
u64 tx_ucast_pkts;
u64 tx_mcast_pkts;
u64 tx_bcast_pkts;
u64 tx_err_drop_pkts;
u64 tpa_coalesced_pkts;
u64 tpa_coalesced_events;
u64 tpa_aborts_num;
u64 tpa_not_coalesced_pkts;
u64 tpa_coalesced_bytes;
/* port */
u64 rx_64_byte_packets;
u64 rx_65_to_127_byte_packets;
u64 rx_128_to_255_byte_packets;
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
u64 rx_1519_to_1522_byte_packets;
u64 rx_1519_to_2047_byte_packets;
u64 rx_2048_to_4095_byte_packets;
u64 rx_4096_to_9216_byte_packets;
u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
u64 rx_pfc_frames;
u64 rx_align_errors;
u64 rx_carrier_errors;
u64 rx_oversize_packets;
u64 rx_jabbers;
u64 rx_undersize_packets;
u64 rx_fragments;
u64 tx_64_byte_packets;
u64 tx_65_to_127_byte_packets;
u64 tx_128_to_255_byte_packets;
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
u64 rx_mac_uc_packets;
u64 rx_mac_mc_packets;
u64 rx_mac_bc_packets;
u64 rx_mac_frames_ok;
u64 tx_mac_bytes;
u64 tx_mac_uc_packets;
u64 tx_mac_mc_packets;
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
};
#endif
enum ecore_dmae_address_type_t {
ECORE_DMAE_ADDRESS_HOST_VIRT,
ECORE_DMAE_ADDRESS_HOST_PHYS,
ECORE_DMAE_ADDRESS_GRC
};
/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
* source is a block of length DMAE_MAX_RW_SIZE and the
* destination is larger, the source block will be duplicated as
* many times as required to fill the destination block. This is
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
#define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define ECORE_DMAE_FLAG_VF_SRC 0x00000002
#define ECORE_DMAE_FLAG_VF_DST 0x00000004
#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
struct ecore_dmae_params {
u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
};
/**
* @brief ecore_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
* @param flags (one of the flags defined above)
*/
enum _ecore_status_t
ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 source_addr,
u32 grc_addr, u32 size_in_dwords, u32 flags);
/**
* @brief ecore_dmae_grc2host - Read data from dmae data offset
* to source address using the given ptt
*
* @param p_ptt
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
* @param flags - one of the flags defined above
*/
enum _ecore_status_t
ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 grc_addr,
dma_addr_t dest_addr, u32 size_in_dwords, u32 flags);
/**
* @brief ecore_dmae_host2host - copy data from to source address
* to a destination address (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
*/
enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct ecore_dmae_params *p_params);
/**
* @brief ecore_chain_alloc - Allocate and initialize a chain
*
* @param p_hwfn
* @param intended_use
* @param mode
* @param num_elems
* @param elem_size
* @param p_chain
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_use_mode intended_use,
enum ecore_chain_mode mode,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems,
osal_size_t elem_size, struct ecore_chain *p_chain);
/**
* @brief ecore_chain_free - Free chain DMA memory
*
* @param p_hwfn
* @param p_chain
*/
void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain);
/**
* @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
u16 src_id, u16 *dst_id);
/**
* @@brief ecore_fw_vport - Get absolute vport ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
u8 src_id, u8 *dst_id);
/**
* @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
u8 src_id, u8 *dst_id);
/**
* @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
*
* @param p_hwfn
* @param p_ptt
* @param p_filter - MAC to add
*/
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 *p_filter);
/**
* @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
*
* @param p_hwfn
* @param p_ptt
* @param p_filter - MAC to remove
*/
void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 *p_filter);
/**
* @brief ecore_llh_add_ethertype_filter - configures a ethertype filter in llh
*
* @param p_hwfn
* @param p_ptt
* @param filter - ethertype to add
*/
enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 filter);
/**
* @brief ecore_llh_remove_ethertype_filter - removes a ethertype llh filter
*
* @param p_hwfn
* @param p_ptt
* @param filter - ethertype to remove
*/
void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 filter);
/**
* @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
*@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn
* @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative.
* @param is_vf - true iff cleanup is made for a VF.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id, bool is_vf);
/**
* @brief ecore_test_registers - Perform register tests
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
*
* @param p_hwfn
* @param p_ptt
* @param coalesce - Coalesce value in micro seconds.
* @param qid - Queue index.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 coalesce, u8 qid);
/**
* @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue
*
* @param p_hwfn
* @param p_ptt
* @param coalesce - Coalesce value in micro seconds.
* @param qid - Queue index.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 coalesce, u8 qid);
#endif

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef GTT_REG_ADDR_H
#define GTT_REG_ADDR_H
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
#endif

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __PREVENT_PXP_GLOBAL_WIN__
static u32 pxp_global_win[] = {
0,
0,
0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
0,
0,
0,
0,
0,
0,
0,
};
#endif /* __PREVENT_PXP_GLOBAL_WIN__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,905 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
#include "ecore_hsi_common.h"
#include "ecore_status.h"
#include "ecore.h"
#include "ecore_hw.h"
#include "reg_addr.h"
#include "ecore_utils.h"
#ifndef ASIC_ONLY
#define ECORE_EMUL_FACTOR 2000
#define ECORE_FPGA_FACTOR 200
#endif
#define ECORE_BAR_ACQUIRE_TIMEOUT 1000
/* Invalid values */
#define ECORE_BAR_INVALID_OFFSET -1
struct ecore_ptt {
osal_list_entry_t list_entry;
unsigned int idx;
struct pxp_ptt_entry pxp;
};
struct ecore_ptt_pool {
osal_list_t free_list;
osal_spinlock_t lock;
struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
};
enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt_pool *p_pool;
int i;
p_pool = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct ecore_ptt_pool));
if (!p_pool)
return ECORE_NOMEM;
OSAL_LIST_INIT(&p_pool->free_list);
for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
p_pool->ptts[i].idx = i;
p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
p_pool->ptts[i].pxp.pretend.control = 0;
/* There are special PTT entries that are taken only by design.
* The rest are added ot the list for general usage.
*/
if (i >= RESERVED_PTT_MAX)
OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
&p_pool->free_list);
}
p_hwfn->p_ptt_pool = p_pool;
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
return ECORE_SUCCESS;
}
void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *p_ptt;
int i;
for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
}
}
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
{
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
p_hwfn->p_ptt_pool = OSAL_NULL;
}
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *p_ptt;
unsigned int i;
/* Take the free PTT from the list */
for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list))
break;
OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
OSAL_MSLEEP(1);
}
/* We should not time-out, but it can happen... --> Lock isn't held */
if (i == ECORE_BAR_ACQUIRE_TIMEOUT) {
DP_NOTICE(p_hwfn, true, "Failed to allocate PTT\n");
return OSAL_NULL;
}
p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
struct ecore_ptt, list_entry);
OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
&p_hwfn->p_ptt_pool->free_list);
OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx);
return p_ptt;
}
void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
/* This PTT should not be set to pretend if it is being released */
OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
}
u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return p_ptt->pxp.offset << 2;
}
static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
{
return PXP_PF_WINDOW_ADMIN_PER_PF_START +
p_ptt->idx * sizeof(struct pxp_ptt_entry);
}
u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
{
return PXP_EXTERNAL_BAR_PF_WINDOW_START +
p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
}
void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 new_hw_addr)
{
u32 prev_hw_addr;
prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
if (new_hw_addr == prev_hw_addr)
return;
/* Update PTT entery in admin window */
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"Updating PTT entry %d to offset 0x%x\n",
p_ptt->idx, new_hw_addr);
/* The HW is using DWORDS and the address is in Bytes */
p_ptt->pxp.offset = new_hw_addr >> 2;
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
OFFSETOF(struct pxp_ptt_entry, offset), p_ptt->pxp.offset);
}
static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 hw_addr)
{
u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
u32 offset;
offset = hw_addr - win_hw_addr;
/* Verify the address is within the window */
if (hw_addr < win_hw_addr ||
offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
offset = 0;
}
return ecore_ptt_get_bar_addr(p_ptt) + offset;
}
struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
enum reserved_ptts ptt_idx)
{
if (ptt_idx >= RESERVED_PTT_MAX) {
DP_NOTICE(p_hwfn, true,
"Requested PTT %d is out of range\n", ptt_idx);
return OSAL_NULL;
}
return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
}
void ecore_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
{
u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
REG_WR(p_hwfn, bar_addr, val);
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
bar_addr, hw_addr, val);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
OSAL_UDELAY(100);
#endif
}
u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
{
u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
u32 val = REG_RD(p_hwfn, bar_addr);
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
bar_addr, hw_addr, val);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
OSAL_UDELAY(100);
#endif
return val;
}
static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
void *addr,
u32 hw_addr, osal_size_t n, bool to_device)
{
u32 dw_count, *host_addr, hw_offset;
osal_size_t quota, done = 0;
u32 OSAL_IOMEM *reg_addr;
while (done < n) {
quota = OSAL_MIN_T(osal_size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
hw_offset = ecore_ptt_get_bar_addr(p_ptt);
dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done);
reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
if (to_device)
while (dw_count--)
DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
else
while (dw_count--)
*host_addr++ = DIRECT_REG_RD(p_hwfn,
reg_addr++);
done += quota;
}
}
void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
void *dest, u32 hw_addr, osal_size_t n)
{
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
hw_addr, dest, hw_addr, (unsigned long)n);
ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
}
void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 hw_addr, void *src, osal_size_t n)
{
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
hw_addr, hw_addr, src, (unsigned long)n);
ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
}
void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 fid)
{
void *p_pretend;
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
/* Every pretend undos prev pretends, including previous port pretend */
SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
p_pretend = &p_ptt->pxp.pretend;
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
}
void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 port_id)
{
void *p_pretend;
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
p_ptt->pxp.pretend.control = control;
p_pretend = &p_ptt->pxp.pretend;
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
}
void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
void *p_pretend;
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
p_ptt->pxp.pretend.control = control;
p_pretend = &p_ptt->pxp.pretend;
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
}
u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
{
u32 concrete_fid = 0;
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
return concrete_fid;
}
/* Not in use @DPDK
* Ecore HW lock
* =============
* Although the implementation is ready, today we don't have any flow that
* utliizes said locks - and we want to keep it this way.
* If this changes, this needs to be revisted.
*/
/* Ecore DMAE
* =============
*/
static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct ecore_dmae_params *p_params)
{
u16 opcode_b = 0;
u32 opcode = 0;
/* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe
* 1- The source is the GRC.
*/
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
DMAE_CMD_SRC_PF_ID_SHIFT;
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
DMAE_CMD_DST_PF_ID_SHIFT;
/* DMAE_E4_TODO need to check which value to specifiy here. */
/* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
/* Whether to write a completion word to the completion destination:
* 0-Do not write a completion word
* 1-Write the completion word
*/
opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
/* swapping mode 3 - big endian there should be a define ifdefed in
* the HSI somewhere. Since it is currently
*/
opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
/* reset source address in next go */
opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
/* reset dest address in next go */
opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
} else {
opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT);
}
if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
}
static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
{
OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
return DMAE_REG_GO_C0 + idx * 4;
}
static enum _ecore_status_t
ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
/* verify address is not OSAL_NULL */
if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
DP_NOTICE(p_hwfn, true,
"source or destination address 0 idx_cmd=%d\n"
"opcode = [0x%08x,0x%04x] len=0x%x"
" src=0x%x:%x dst=0x%x:%x\n",
idx_cmd, (u32)p_command->opcode,
(u16)p_command->opcode_b,
(int)p_command->length,
(int)p_command->src_addr_hi,
(int)p_command->src_addr_lo,
(int)p_command->dst_addr_hi,
(int)p_command->dst_addr_lo);
return ECORE_INVAL;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
"len=0x%x src=0x%x:%x dst=0x%x:%x\n",
idx_cmd, (u32)p_command->opcode,
(u16)p_command->opcode_b,
(int)p_command->length,
(int)p_command->src_addr_hi,
(int)p_command->src_addr_lo,
(int)p_command->dst_addr_hi, (int)p_command->dst_addr_lo);
/* Copy the command to DMAE - need to do it before every call
* for source/dest address no reset.
* The number of commands have been increased to 16 (previous was 14)
* The first 9 DWs are the command registers, the 10 DW is the
* GO register, and
* the rest are result registers (which are read only by the client).
*/
for (i = 0; i < DMAE_CMD_SIZE; i++) {
u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
*(((u32 *)p_command) + i) : 0;
ecore_wr(p_hwfn, p_ptt,
DMAE_REG_CMD_MEM +
(idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
(i * sizeof(u32)), data);
}
ecore_wr(p_hwfn, p_ptt,
ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
return ecore_status;
}
enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
{
dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
if (*p_comp == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `p_completion_word'\n");
ecore_dmae_info_free(p_hwfn);
return ECORE_NOMEM;
}
p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(struct dmae_cmd));
if (*p_cmd == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct dmae_cmd'\n");
ecore_dmae_info_free(p_hwfn);
return ECORE_NOMEM;
}
p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(u32) * DMAE_MAX_RW_SIZE);
if (*p_buff == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `intermediate_buffer'\n");
ecore_dmae_info_free(p_hwfn);
return ECORE_NOMEM;
}
/* DMAE_E4_TODO : Need to change this to reflect proper channel */
p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
return ECORE_SUCCESS;
}
void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
{
dma_addr_t p_phys;
/* Just make sure no one is in the middle */
OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
p_hwfn->dmae_info.p_completion_word,
p_phys, sizeof(u32));
p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
}
if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
p_hwfn->dmae_info.p_dmae_cmd,
p_phys, sizeof(struct dmae_cmd));
p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
}
if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
p_hwfn->dmae_info.p_intermediate_buffer,
p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
}
OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
}
static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
{
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 wait_cnt_limit = 10000, wait_cnt = 0;
#ifndef ASIC_ONLY
u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
ECORE_EMUL_FACTOR :
(CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
ECORE_FPGA_FACTOR : 1));
wait_cnt_limit *= factor;
#endif
/* DMAE_E4_TODO : TODO check if we have to call any other function
* other than BARRIER to sync the completion_word since we are not
* using the volatile keyword for this
*/
OSAL_BARRIER(p_hwfn->p_dev);
while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
/* DMAE_E4_TODO : using OSAL_MSLEEP instead of mm_wait since mm
* functions are getting depriciated. Need to review for future.
*/
OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
if (++wait_cnt > wait_cnt_limit) {
DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
"Timed-out waiting for operation to"
" complete. Completion word is 0x%08x"
" expected 0x%08x.\n",
*p_hwfn->dmae_info.p_completion_word,
DMAE_COMPLETION_VAL);
ecore_status = ECORE_TIMEOUT;
break;
}
/* to sync the completion_word since we are not
* using the volatile keyword for p_completion_word
*/
OSAL_BARRIER(p_hwfn->p_dev);
}
if (ecore_status == ECORE_SUCCESS)
*p_hwfn->dmae_info.p_completion_word = 0;
return ecore_status;
}
static enum _ecore_status_t
ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 src_addr,
u64 dst_addr,
u8 src_type, u8 dst_type, u32 length)
{
dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
switch (src_type) {
case ECORE_DMAE_ADDRESS_GRC:
case ECORE_DMAE_ADDRESS_HOST_PHYS:
cmd->src_addr_hi = DMA_HI(src_addr);
cmd->src_addr_lo = DMA_LO(src_addr);
break;
/* for virt source addresses we use the intermediate buffer. */
case ECORE_DMAE_ADDRESS_HOST_VIRT:
cmd->src_addr_hi = DMA_HI(phys);
cmd->src_addr_lo = DMA_LO(phys);
OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
(void *)(osal_uintptr_t)src_addr,
length * sizeof(u32));
break;
default:
return ECORE_INVAL;
}
switch (dst_type) {
case ECORE_DMAE_ADDRESS_GRC:
case ECORE_DMAE_ADDRESS_HOST_PHYS:
cmd->dst_addr_hi = DMA_HI(dst_addr);
cmd->dst_addr_lo = DMA_LO(dst_addr);
break;
/* for virt destination address we use the intermediate buff. */
case ECORE_DMAE_ADDRESS_HOST_VIRT:
cmd->dst_addr_hi = DMA_HI(phys);
cmd->dst_addr_lo = DMA_LO(phys);
break;
default:
return ECORE_INVAL;
}
cmd->length = (u16)length;
if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
OSAL_DMA_SYNC(p_hwfn->p_dev,
(void *)HILO_U64(cmd->src_addr_hi,
cmd->src_addr_lo),
length * sizeof(u32), false);
ecore_dmae_post_command(p_hwfn, p_ptt);
ecore_status = ecore_dmae_operation_wait(p_hwfn);
/* TODO - is it true ? */
if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
OSAL_DMA_SYNC(p_hwfn->p_dev,
(void *)HILO_U64(cmd->src_addr_hi,
cmd->src_addr_lo),
length * sizeof(u32), true);
if (ecore_status != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, ECORE_MSG_HW,
"ecore_dmae_host2grc: Wait Failed. source_addr"
" 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
(unsigned long)src_addr, (unsigned long)dst_addr,
length);
return ecore_status;
}
if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
&p_hwfn->dmae_info.p_intermediate_buffer[0],
length * sizeof(u32));
return ECORE_SUCCESS;
}
static enum _ecore_status_t
ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 src_addr,
u64 dst_addr,
u8 src_type,
u8 dst_type,
u32 size_in_dwords,
struct ecore_dmae_params *p_params)
{
dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u64 src_addr_split = 0, dst_addr_split = 0;
u16 length_limit = DMAE_MAX_RW_SIZE;
u32 offset = 0;
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
(dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
cmd->comp_addr_lo = DMA_LO(phys);
cmd->comp_addr_hi = DMA_HI(phys);
cmd->comp_val = DMAE_COMPLETION_VAL;
/* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
cnt_split = size_in_dwords / length_limit;
length_mod = size_in_dwords % length_limit;
src_addr_split = src_addr;
dst_addr_split = dst_addr;
for (i = 0; i <= cnt_split; i++) {
offset = length_limit * i;
if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
if (src_type == ECORE_DMAE_ADDRESS_GRC)
src_addr_split = src_addr + offset;
else
src_addr_split = src_addr + (offset * 4);
}
if (dst_type == ECORE_DMAE_ADDRESS_GRC)
dst_addr_split = dst_addr + offset;
else
dst_addr_split = dst_addr + (offset * 4);
length_cur = (cnt_split == i) ? length_mod : length_limit;
/* might be zero on last iteration */
if (!length_cur)
continue;
ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
p_ptt,
src_addr_split,
dst_addr_split,
src_type,
dst_type,
length_cur);
if (ecore_status != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"ecore_dmae_execute_sub_operation Failed"
" with error 0x%x. source_addr 0x%lx,"
" dest addr 0x%lx, size_in_dwords 0x%x\n",
ecore_status, (unsigned long)src_addr,
(unsigned long)dst_addr, length_cur);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
break;
}
}
return ecore_status;
}
enum _ecore_status_t
ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 source_addr,
u32 grc_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct ecore_dmae_params params;
enum _ecore_status_t rc;
OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
params.flags = flags;
OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
grc_addr_in_dw,
ECORE_DMAE_ADDRESS_HOST_VIRT,
ECORE_DMAE_ADDRESS_GRC,
size_in_dwords, &params);
OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
return rc;
}
enum _ecore_status_t
ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 grc_addr,
dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
struct ecore_dmae_params params;
enum _ecore_status_t rc;
OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
params.flags = flags;
OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, ECORE_DMAE_ADDRESS_GRC,
ECORE_DMAE_ADDRESS_HOST_VIRT,
size_in_dwords, &params);
OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
return rc;
}
enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct ecore_dmae_params *p_params)
{
enum _ecore_status_t rc;
OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
dest_addr,
ECORE_DMAE_ADDRESS_HOST_PHYS,
ECORE_DMAE_ADDRESS_HOST_PHYS,
size_in_dwords, p_params);
OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
return rc;
}
u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
enum protocol_type proto,
union ecore_qm_pq_params *p_params)
{
u16 pq_id = 0;
if ((proto == PROTOCOLID_CORE ||
proto == PROTOCOLID_ETH) && !p_params) {
DP_NOTICE(p_hwfn, true,
"Protocol %d received NULL PQ params\n", proto);
return 0;
}
switch (proto) {
case PROTOCOLID_CORE:
if (p_params->core.tc == LB_TC)
pq_id = p_hwfn->qm_info.pure_lb_pq;
else if (p_params->core.tc == OOO_LB_TC)
pq_id = p_hwfn->qm_info.ooo_pq;
else
pq_id = p_hwfn->qm_info.offload_pq;
break;
case PROTOCOLID_ETH:
pq_id = p_params->eth.tc;
/* TODO - multi-CoS for VFs? */
if (p_params->eth.is_vf)
pq_id += p_hwfn->qm_info.vf_queues_offset +
p_params->eth.vf_id;
break;
default:
pq_id = 0;
}
pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
return pq_id;
}
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type)
{
/* Fan failure cannot be masked by handling of another HW error */
if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
"Recovery is in progress."
"Avoid notifying about HW error %d.\n",
err_type);
return;
}
OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
}

View File

@ -0,0 +1,269 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HW_H__
#define __ECORE_HW_H__
#include "ecore.h"
#include "ecore_dev_api.h"
/* Forward decleration */
struct ecore_ptt;
enum reserved_ptts {
RESERVED_PTT_EDIAG,
RESERVED_PTT_USER_SPACE,
RESERVED_PTT_MAIN,
RESERVED_PTT_DPC,
RESERVED_PTT_MAX
};
/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1
* instead of 0, this should be fixed in later HW versions.
*/
#ifndef MISC_REG_DRIVER_CONTROL_0
#define MISC_REG_DRIVER_CONTROL_0 MISC_REG_DRIVER_CONTROL_1
#endif
#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE
#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE
#endif
enum _dmae_cmd_dst_mask {
DMAE_CMD_DST_MASK_NONE = 0,
DMAE_CMD_DST_MASK_PCIE = 1,
DMAE_CMD_DST_MASK_GRC = 2
};
enum _dmae_cmd_src_mask {
DMAE_CMD_SRC_MASK_PCIE = 0,
DMAE_CMD_SRC_MASK_GRC = 1
};
enum _dmae_cmd_crc_mask {
DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
};
/* definitions for DMA constants */
#define DMAE_GO_VALUE 0x1
#ifdef __BIG_ENDIAN
#define DMAE_COMPLETION_VAL 0xAED10000
#define DMAE_CMD_ENDIANITY 0x3
#else
#define DMAE_COMPLETION_VAL 0xD1AE
#define DMAE_CMD_ENDIANITY 0x2
#endif
#define DMAE_CMD_SIZE 14
/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
/* Minimum wait for dmae opertaion to complete 2 milliseconds */
#define DMAE_MIN_WAIT_TIME 0x2
#define DMAE_MAX_CLIENTS 32
/**
* @brief ecore_gtt_init - Initialize GTT windows
*
* @param p_hwfn
*/
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
*
* @param p_hwfn
*/
void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool
*
* @param p_hwfn
*
* @return _ecore_status_t - success (0), negative - error.
*/
enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_pool_free -
*
* @param p_hwfn
*/
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
*
* @param p_hwfn
* @param p_ptt
*
* @return u32
*/
u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
*
* @param p_hwfn
* @param p_ptt
*
* @return u32
*/
u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
*
* @param p_hwfn
* @param new_hw_addr
* @param p_ptt
*/
void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 new_hw_addr);
/**
* @brief ecore_get_reserved_ptt - Get a specific reserved PTT
*
* @param p_hwfn
* @param ptt_idx
*
* @return struct ecore_ptt *
*/
struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
enum reserved_ptts ptt_idx);
/**
* @brief ecore_wr - Write value to BAR using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param val
* @param hw_addr
*/
void ecore_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 hw_addr, u32 val);
/**
* @brief ecore_rd - Read value from BAR using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param val
* @param hw_addr
*/
u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr);
/**
* @brief ecore_memcpy_from - copy n bytes from BAR using the given
* ptt
*
* @param p_hwfn
* @param p_ptt
* @param dest
* @param hw_addr
* @param n
*/
void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
void *dest, u32 hw_addr, osal_size_t n);
/**
* @brief ecore_memcpy_to - copy n bytes to BAR using the given
* ptt
*
* @param p_hwfn
* @param p_ptt
* @param hw_addr
* @param src
* @param n
*/
void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 hw_addr, void *src, osal_size_t n);
/**
* @brief ecore_fid_pretend - pretend to another function when
* accessing the ptt window. There is no way to unpretend
* a function. The only way to cancel a pretend is to
* pretend back to the original function.
*
* @param p_hwfn
* @param p_ptt
* @param fid - fid field of pxp_pretend structure. Can contain
* either pf / vf, port/path fields are don't care.
*/
void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 fid);
/**
* @brief ecore_port_pretend - pretend to another port when
* accessing the ptt window
*
* @param p_hwfn
* @param p_ptt
* @param port_id - the port to pretend to
*/
void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 port_id);
/**
* @brief ecore_port_unpretend - cancel any previously set port
* pretend
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief ecore_vfid_to_concrete - build a concrete FID for a
* given VF ID
*
* @param p_hwfn
* @param p_ptt
* @param vfid
*/
u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
/**
* @brief ecore_dmae_info_alloc - Init the dmae_info structure
* which is part of p_hwfn.
* @param p_hwfn
*/
enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_dmae_info_free - Free the dmae_info structure
* which is part of p_hwfn
*
* @param p_hwfn
*/
void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
union ecore_qm_pq_params {
struct {
u8 tc;
} core;
struct {
u8 is_vf;
u8 vf_id;
u8 tc;
} eth;
};
u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
enum protocol_type proto, union ecore_qm_pq_params *params);
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *fw_data);
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type);
#endif /* __ECORE_HW_H__ */

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _ECORE_IGU_DEF_H_
#define _ECORE_IGU_DEF_H_
/* Fields of IGU PF CONFIGRATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands
*/
enum igu_ctrl_cmd {
IGU_CTRL_CMD_TYPE_RD,
IGU_CTRL_CMD_TYPE_WR,
MAX_IGU_CTRL_CMD
};
/* Control register for the IGU command register
*/
struct igu_ctrl_reg {
u32 ctrl_data;
#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
#define IGU_CTRL_REG_FID_SHIFT 0
#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
#define IGU_CTRL_REG_RESERVED_MASK 0x1
#define IGU_CTRL_REG_RESERVED_SHIFT 28
#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
#define IGU_CTRL_REG_TYPE_SHIFT 31
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,263 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _INIT_FW_FUNCS_H
#define _INIT_FW_FUNCS_H
/* forward declarations */
struct init_qm_pq_params;
/**
* @brief ecore_qm_pf_mem_size - prepare QM ILT sizes
*
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
* @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
* @param num_pf_pqs - number of PQs used by this PF
* @param num_vf_pqs - number of PQs used by VFs of this PF
*
* @return The required host memory size in 4KB units.
*/
u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
/**
* @brief ecore_qm_common_rt_init -
* Prepare QM runtime init values for the engine phase
*
* @param p_hwfn
* @param max_ports_per_engine - max number of ports per engine in HW
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
* @param pf_rl_en - enable per-PF rate limiters
* @param pf_wfq_en - enable per-PF WFQ
* @param vport_rl_en - enable per-VPORT rate limiters
* @param vport_wfq_en - enable per-VPORT WFQ
* @param port_params- array of size MAX_NUM_PORTS with parameters for each port
*
* @return 0 on success, -1 on error.
*/
int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
bool pf_rl_en,
bool pf_wfq_en,
bool vport_rl_en,
bool vport_wfq_en,
struct init_qm_port_params
port_params[MAX_NUM_PORTS]);
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
/**
* @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
* @param pf_id - PF ID
* @param pf_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
/**
* @brief ecore_init_pf_rl Initializes the rate limit of the specified PF
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
* @param pf_id - PF ID
* @param pf_rl - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl);
/**
* @brief ecore_init_vport_wfq Initializes the WFQ weight of the specified VPORT
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
* @param first_tx_pq_id- An array containing the first Tx PQ ID associated
* with the VPORT for each TC. This array is filled by
* ecore_qm_pf_rt_init
* @param vport_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
/**
* @brief ecore_init_vport_rl Initializes the rate limit of the specified VPORT
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl);
/**
* @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
* @param is_release_cmd - true for release, false for stop.
* @param is_tx_pq - true for Tx PQs, false for Other PQs.
* @param start_pq - first PQ ID to stop
* @param num_pqs - Number of PQs to stop, starting from start_pq.
*
* @return bool, true if successful, false if timeout occurred while
* waiting for QM command done.
*/
bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool is_release_cmd,
bool is_tx_pq, u16 start_pq, u16 num_pqs);
/**
* @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
*
* Based on weight/priority requirements per-TC.
*
* @param p_ptt - ptt window used for writing the registers.
* @param req - the NIG ETS initialization requirements.
* @param is_lb - if set, the loopback port arbiter is initialized, otherwise
* the physical port arbiter is initialized. The pure-LB TC
* requirements are ignored when is_lb is cleared.
*/
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req, bool is_lb);
/**
* @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
*
* Based on global and per-TC rate requirements
*
* @param p_ptt - ptt window used for writing the registers.
* @param req - the NIG LB RLs initialization requirements.
*/
void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req *req);
/**
* @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
*
* Assumes valid arguments.
*
* @param p_ptt - ptt window used for writing the registers.
* @param req - required mapping from prioirties to TCs.
*/
void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req *req);
/**
* @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
*
* Based on weight/priority requirements per-TC.
*
* @param p_ptt - ptt window used for writing the registers.
* @param req - the PRS ETS initialization requirements.
*/
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_ets_req *req);
/**
* @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
*
* Based on weight/priority requirements per-TC.
*
* @param p_ptt - ptt window used for writing the registers.
* @param req - the BRB RAM initialization requirements.
*/
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_brb_ram_req *req);
/**
* @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf
* and llh ethType Regs to input ethType
* should Be called once per engine if engine is in BD mode.
*
* @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 eth_type);
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs
* to input ethType
* should Be called once per port.
*
* @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 eth_type);
/**
* @brief ecore_set_vxlan_dest_port - init vxlan tunnel destination udp port
*
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - vxlan destination udp port.
*/
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port);
/**
* @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
* @param p_ptt - ptt window used for writing the registers.
* @param vxlan_enable - vxlan enable flag.
*/
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, bool vxlan_enable);
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
* @param p_ptt - ptt window used for writing the registers.
* @param eth_gre_enable - eth GRE enable enable flag.
* @param ip_gre_enable - IP GRE enable enable flag.
*/
void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable);
/**
* @brief ecore_set_geneve_dest_port - init geneve tunnel destination udp port
*
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - geneve destination udp port.
*/
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port);
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
* @param p_ptt - ptt window used for writing the registers.
* @param eth_geneve_enable - eth GENEVE enable enable flag.
* @param ip_geneve_enable - IP GENEVE enable enable flag.
*/
void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable);
#endif

View File

@ -0,0 +1,595 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
/* include the precompiled configuration values - only once */
#include "bcm_osal.h"
#include "ecore_hsi_common.h"
#include "ecore.h"
#include "ecore_hw.h"
#include "ecore_status.h"
#include "ecore_rt_defs.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_iro_values.h"
#include "ecore_gtt_values.h"
#include "reg_addr.h"
#include "ecore_init_ops.h"
#define ECORE_INIT_MAX_POLL_COUNT 100
#define ECORE_INIT_POLL_PERIOD_US 500
void ecore_init_iro_array(struct ecore_dev *p_dev)
{
p_dev->iro_arr = iro_arr;
}
/* Runtime configuration helpers */
void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
{
int i;
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
p_hwfn->rt_data.b_valid[i] = false;
}
void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
u32 rt_offset, u32 *p_val, osal_size_t size)
{
osal_size_t i;
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
}
}
static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr,
u16 rt_offset,
u16 size, bool b_must_dmae)
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
enum _ecore_status_t rc = ECORE_SUCCESS;
u16 i, segment;
/* Since not all RT entries are initialized, go over the RT and
* for each segment of initialized values use DMA.
*/
for (i = 0; i < size; i++) {
if (!p_valid[i])
continue;
/* In case there isn't any wide-bus configuration here,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
continue;
}
/* Start of a new segment */
for (segment = 1; i + segment < size; segment++)
if (!p_valid[i + segment])
break;
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
if (rc != ECORE_SUCCESS)
return rc;
/* Jump over the entire segment, including invalid entry */
i += segment;
}
return rc;
}
enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(bool) * RUNTIME_ARRAY_SIZE);
if (!rt_data->b_valid)
return ECORE_NOMEM;
rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(u32) * RUNTIME_ARRAY_SIZE);
if (!rt_data->init_val) {
OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
return ECORE_NOMEM;
}
return ECORE_SUCCESS;
}
void ecore_init_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
}
static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr,
u32 dmae_data_offset,
u32 size, const u32 *p_buf,
bool b_must_dmae,
bool b_can_dmae)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Perform DMAE only for lengthy enough sections or for wide-bus */
#ifndef ASIC_ONLY
if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
!b_can_dmae || (!b_must_dmae && (size < 16))) {
#else
if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
#endif
const u32 *data = p_buf + dmae_data_offset;
u32 i;
for (i = 0; i < size; i++)
ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
} else {
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_buf +
dmae_data_offset),
addr, size, 0);
}
return rc;
}
static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr, u32 fill,
u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
return ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)&zero_buffer[0],
addr, fill_count,
ECORE_DMAE_FLAG_RW_REPL_SRC);
}
static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr, u32 fill, u32 fill_count)
{
u32 i;
for (i = 0; i < fill_count; i++, addr += sizeof(u32))
ecore_wr(p_hwfn, p_ptt, addr, fill);
}
static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_write_op *cmd,
bool b_must_dmae,
bool b_can_dmae)
{
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 offset, output_len, input_len, max_size;
#endif
u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
struct ecore_dev *p_dev = p_hwfn->p_dev;
enum _ecore_status_t rc = ECORE_SUCCESS;
union init_array_hdr *hdr;
const u32 *array_data;
u32 size, addr, data;
array_data = p_dev->fw_data->arr_data;
data = OSAL_LE32_TO_CPU(cmd->data);
addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
hdr = (union init_array_hdr *)
(uintptr_t)(array_data + dmae_array_offset);
data = OSAL_LE32_TO_CPU(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
#ifdef CONFIG_ECORE_ZIPPED_FW
offset = dmae_array_offset + 1;
input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
max_size = MAX_ZIPPED_SIZE * 4;
OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
(u8 *)(uintptr_t)&array_data[offset],
max_size,
(u8 *)p_hwfn->unzip_buf);
if (output_len) {
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
output_len,
p_hwfn->unzip_buf,
b_must_dmae, b_can_dmae);
} else {
DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
rc = ECORE_INVAL;
}
#else
DP_NOTICE(p_hwfn, true,
"Using zipped firmware without config enabled\n");
rc = ECORE_INVAL;
#endif
break;
case INIT_ARR_PATTERN:
{
u32 repeats = GET_FIELD(data,
INIT_ARRAY_PATTERN_HDR_REPETITIONS);
u32 i;
size = GET_FIELD(data,
INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
for (i = 0; i < repeats; i++, addr += size << 2) {
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
dmae_array_offset +
1, size, array_data,
b_must_dmae,
b_can_dmae);
if (rc)
break;
}
break;
}
case INIT_ARR_STANDARD:
size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
dmae_array_offset + 1,
size, array_data,
b_must_dmae, b_can_dmae);
break;
}
return rc;
}
/* init_ops write command */
static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_write_op *p_cmd,
bool b_can_dmae)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
bool b_must_dmae;
u32 addr, data;
data = OSAL_LE32_TO_CPU(p_cmd->data);
b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
/* Sanitize */
if (b_must_dmae && !b_can_dmae) {
DP_NOTICE(p_hwfn, true,
"Need to write to %08x for Wide-bus but DMAE isn't"
" allowed\n",
addr);
return ECORE_INVAL;
}
switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
case INIT_SRC_INLINE:
data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
ecore_wr(p_hwfn, p_ptt, addr, data);
break;
case INIT_SRC_ZEROS:
data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
else
ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
case INIT_SRC_ARRAY:
rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
ecore_init_rt(p_hwfn, p_ptt, addr,
OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
b_must_dmae);
break;
}
return rc;
}
static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
{
return (val == expected_val);
}
static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
{
return (val & expected_val) == expected_val;
}
static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
{
return (val | expected_val) > 0;
}
/* init_ops read/poll commands */
static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_read_op *cmd)
{
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
u32 data, addr, poll;
int i;
data = OSAL_LE32_TO_CPU(cmd->op_data);
addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
delay *= 100;
#endif
val = ecore_rd(p_hwfn, p_ptt, addr);
if (poll == INIT_POLL_NONE)
return;
switch (poll) {
case INIT_POLL_EQ:
comp_check = comp_eq;
break;
case INIT_POLL_OR:
comp_check = comp_or;
break;
case INIT_POLL_AND:
comp_check = comp_and;
break;
default:
DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
cmd->op_data);
return;
}
data = OSAL_LE32_TO_CPU(cmd->expected_val);
for (i = 0;
i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
OSAL_UDELAY(delay);
val = ecore_rd(p_hwfn, p_ptt, addr);
}
if (i == ECORE_INIT_MAX_POLL_COUNT)
DP_ERR(p_hwfn,
"Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
" Got: %08x (comparsion %08x)]\n",
addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
OSAL_LE32_TO_CPU(cmd->op_data));
}
/* init_ops callbacks entry point */
static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_callback_op *p_cmd)
{
DP_NOTICE(p_hwfn, true,
"Currently init values have no need of callbacks\n");
}
static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
u16 *p_offset, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
modes_tree_buf = p_dev->fw_data->modes_tree_buf;
tree_val = modes_tree_buf[(*p_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
case INIT_MODE_OP_OR:
arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 | arg2;
case INIT_MODE_OP_AND:
arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 & arg2;
default:
tree_val -= MAX_INIT_MODE_OPS;
return (modes & (1 << tree_val)) ? 1 : 0;
}
}
static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
struct init_if_mode_op *p_cmd, int modes)
{
u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
return 0;
else
return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
INIT_IF_MODE_OP_CMD_OFFSET);
}
static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
INIT_IF_PHASE_OP_CMD_OFFSET);
else
return 0;
}
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int phase, int phase_id, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
num_init_ops = p_dev->fw_data->init_ops_size;
init_ops = p_dev->fw_data->init_ops;
#ifdef CONFIG_ECORE_ZIPPED_FW
p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
MAX_ZIPPED_SIZE * 4);
if (!p_hwfn->unzip_buf) {
DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
return ECORE_NOMEM;
}
#endif
for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
union init_op *cmd = &init_ops[cmd_num];
u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
case INIT_OP_WRITE:
rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
b_dmae);
break;
case INIT_OP_READ:
ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
break;
case INIT_OP_IF_MODE:
cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
modes);
break;
case INIT_OP_IF_PHASE:
cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* ecore_init_run is always invoked from
* sleep-able context
*/
OSAL_UDELAY(cmd->delay.delay);
break;
case INIT_OP_CALLBACK:
ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}
if (rc)
break;
}
#ifdef CONFIG_ECORE_ZIPPED_FW
OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
#endif
return rc;
}
void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
{
u32 gtt_base;
u32 i;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
/* This is done by MFW on ASIC; regardless, this should only
* be done once per chip [i.e., common]. Implementation is
* not too bright, but it should work on the simple FPGA/EMUL
* scenarios.
*/
bool initialized = false; /* @DPDK */
int poll_cnt = 500;
u32 val;
/* initialize PTT/GTT (poll for completion) */
if (!initialized) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_START_INIT_PTT_GTT, 1);
initialized = true;
}
do {
/* ptt might be overrided by HW until this is done */
OSAL_UDELAY(10);
ecore_ptt_invalidate(p_hwfn);
val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_INIT_DONE_PTT_GTT);
} while ((val != 1) && --poll_cnt);
if (!poll_cnt)
DP_ERR(p_hwfn,
"PGLUE_B_REG_INIT_DONE didn't complete\n");
}
#endif
/* Set the global windows */
gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
if (pxp_global_win[i])
REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
pxp_global_win[i]);
}
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *data)
{
struct ecore_fw_data *fw = p_dev->fw_data;
#ifdef CONFIG_ECORE_BINARY_FW
struct bin_buffer_hdr *buf_hdr;
u32 offset, len;
if (!data) {
DP_NOTICE(p_dev, true, "Invalid fw data\n");
return ECORE_INVAL;
}
buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
fw->arr_data = (u32 *)((uintptr_t)(data + offset));
offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
#else
fw->init_ops = (union init_op *)init_ops;
fw->arr_data = (u32 *)init_val;
fw->modes_tree_buf = (u8 *)modes_tree_buf;
fw->init_ops_size = init_ops_size;
#endif
return ECORE_SUCCESS;
}

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_INIT_OPS__
#define __ECORE_INIT_OPS__
#include "ecore.h"
/**
* @brief ecore_init_iro_array - init iro_arr.
*
*
* @param p_dev
*/
void ecore_init_iro_array(struct ecore_dev *p_dev);
/**
* @brief ecore_init_run - Run the init-sequence.
*
*
* @param p_hwfn
* @param p_ptt
* @param phase
* @param phase_id
* @param modes
* @return _ecore_status_t
*/
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int phase, int phase_id, int modes);
/**
* @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
*
*
* @param p_hwfn
*
* @return _ecore_status_t
*/
enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_init_hwfn_deallocate
*
*
* @param p_hwfn
*/
void ecore_init_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_init_clear_rt_data - Clears the runtime init array.
*
*
* @param p_hwfn
*/
void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_init_store_rt_reg - Store a configuration value in the RT array.
*
*
* @param p_hwfn
* @param rt_offset
* @param val
*/
void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val);
#define STORE_RT_REG(hwfn, offset, val) \
ecore_init_store_rt_reg(hwfn, offset, val)
#define OVERWRITE_RT_REG(hwfn, offset, val) \
ecore_init_store_rt_reg(hwfn, offset, val)
/**
* @brief
*
*
* @param p_hwfn
* @param rt_offset
* @param val
* @param size
*/
void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
u32 rt_offset, u32 *val, osal_size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
/**
* @brief
* Initialize GTT global windows and set admin window
* related params of GTT/PTT to default values.
*
* @param p_hwfn
*/
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_INIT_OPS__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,234 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_INT_H__
#define __ECORE_INT_H__
#include "ecore.h"
#include "ecore_int_api.h"
#define ECORE_CAU_DEF_RX_TIMER_RES 0
#define ECORE_CAU_DEF_TX_TIMER_RES 0
#define ECORE_SB_ATT_IDX 0x0001
#define ECORE_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
struct ecore_igu_block {
u8 status;
#define ECORE_IGU_STATUS_FREE 0x01
#define ECORE_IGU_STATUS_VALID 0x02
#define ECORE_IGU_STATUS_PF 0x04
u8 vector_number;
u8 function_id;
u8 is_pf;
};
struct ecore_igu_map {
struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
};
struct ecore_igu_info {
struct ecore_igu_map igu_map;
u16 igu_dsb_id;
u16 igu_base_sb;
u16 igu_base_sb_iov;
u16 igu_sb_cnt;
u16 igu_sb_cnt_iov;
u16 free_blks;
};
/* TODO Names of function may change... */
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool b_set, bool b_slowpath);
void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_int_igu_read_cam - Reads the IGU CAM.
* This function needs to be called during hardware
* prepare. It reads the info from igu cam to know which
* status block is the default / base status block etc.
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn,
void *cookie);
/**
* @brief ecore_int_register_cb - Register callback func for
* slowhwfn statusblock.
*
* Every protocol that uses the slowhwfn status block
* should register a callback function that will be called
* once there is an update of the sp status block.
*
* @param p_hwfn
* @param comp_cb - function to be called when there is an
* interrupt on the sp sb
*
* @param cookie - passed to the callback function
* @param sb_idx - OUT parameter which gives the chosen index
* for this protocol.
* @param p_fw_cons - pointer to the actual address of the
* consumer for this protocol.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
ecore_int_comp_cb_t comp_cb,
void *cookie,
u8 *sb_idx, __le16 **p_fw_cons);
/**
* @brief ecore_int_unregister_cb - Unregisters callback
* function from sp sb.
* Partner of ecore_int_register_cb -> should be called
* when no longer required.
*
* @param p_hwfn
* @param pi
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi);
/**
* @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id.
*
* @param p_hwfn
*
* @return u16
*/
u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
/**
* @brief Status block cleanup. Should be called for each status
* block that will be used -> both PF / VF
*
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
* @param cleanup_set - set(1) / clear(0)
* @param opaque_fid - the function for which to perform
* cleanup, for example a PF on behalf of
* its VFs.
*/
void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 sb_id, bool cleanup_set, u16 opaque_fid);
/**
* @brief Status block cleanup. Should be called for each status
* block that will be used -> both PF / VF
*
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
* @param cleanup_set - set(1) / clear(0)
*/
void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 sb_id, u16 opaque, bool b_set);
/**
* @brief ecore_int_cau_conf - configure cau for a given status
* block
*
* @param p_hwfn
* @param ptt
* @param sb_phys
* @param igu_sb_id
* @param vf_number
* @param vf_valid
*/
void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
dma_addr_t sb_phys,
u16 igu_sb_id, u16 vf_number, u8 vf_valid);
/**
* @brief ecore_int_alloc
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_int_free
*
* @param p_hwfn
*/
void ecore_int_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_int_setup
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief - Returns an Rx queue index appropriate for usage with given SB.
*
* @param p_hwfn
* @param sb_id - absolute index of SB
*
* @return index of Rx queue
*/
u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
* @param p_ptt
* @param int_mode
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode);
/**
* @brief - Initialize CAU status block entry
*
* @param p_hwfn
* @param p_sb_entry
* @param pf_id
* @param vf_number
* @param vf_valid
*/
void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry, u8 pf_id,
u16 vf_number, u8 vf_valid);
#ifndef ASIC_ONLY
#define ECORE_MAPPING_MEMORY_SIZE(dev) \
((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \
136 : NUM_OF_SBS(dev))
#else
#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
#endif
#endif /* __ECORE_INT_H__ */

View File

@ -0,0 +1,277 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_INT_API_H__
#define __ECORE_INT_API_H__
#ifndef __EXTRACT__LINUX__
#define ECORE_SB_IDX 0x0002
#define RX_PI 0
#define TX_PI(tc) (RX_PI + 1 + tc)
#ifndef ECORE_INT_MODE
#define ECORE_INT_MODE
enum ecore_int_mode {
ECORE_INT_MODE_INTA,
ECORE_INT_MODE_MSIX,
ECORE_INT_MODE_MSI,
ECORE_INT_MODE_POLL,
};
#endif
struct ecore_sb_info {
struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
void OSAL_IOMEM *igu_addr;
u8 flags;
#define ECORE_SB_INFO_INIT 0x1
#define ECORE_SB_INFO_SETUP 0x2
#ifdef ECORE_CONFIG_DIRECT_HWFN
struct ecore_hwfn *p_hwfn;
#endif
struct ecore_dev *p_dev;
};
struct ecore_sb_cnt_info {
int sb_cnt;
int sb_iov_cnt;
int sb_free_blk;
};
static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
{
u32 prod = 0;
u16 rc = 0;
/* barrier(); status block is written to by the chip */
/* FIXME: need some sort of barrier. */
prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= ECORE_SB_IDX;
}
OSAL_MMIOWB(sb_info->p_dev);
return rc;
}
/**
*
* @brief This function creates an update command for interrupts that is
* written to the IGU.
*
* @param sb_info - This is the structure allocated and
* initialized per status block. Assumption is
* that it was initialized using ecore_sb_init
* @param int_cmd - Enable/Disable/Nop
* @param upd_flg - whether igu consumer should be
* updated.
*
* @return OSAL_INLINE void
*/
static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
enum igu_int_cmd int_cmd, u8 upd_flg)
{
struct igu_prod_cons_update igu_ack = { 0 };
igu_ack.sb_id_and_flags =
((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
(int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
(IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
#ifdef ECORE_CONFIG_DIRECT_HWFN
DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
igu_ack.sb_id_and_flags);
#else
DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
#endif
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
OSAL_MMIOWB(sb_info->p_dev);
OSAL_BARRIER(sb_info->p_dev);
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#else
static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#endif
{
unsigned int i;
for (i = 0; i < size / sizeof(*data); i++)
DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
{
__internal_ram_wr(p_hwfn, addr, size, data);
}
#else
static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
int size, u32 *data)
{
__internal_ram_wr(OSAL_NULL, addr, size, data);
}
#endif
#endif
struct ecore_hwfn;
struct ecore_ptt;
enum ecore_coalescing_fsm {
ECORE_COAL_RX_STATE_MACHINE,
ECORE_COAL_TX_STATE_MACHINE
};
/**
* @brief ecore_int_cau_conf_pi - configure cau for a given
* status block
*
* @param p_hwfn
* @param p_ptt
* @param igu_sb_id
* @param pi_index
* @param state
* @param timeset
*/
void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 igu_sb_id,
u32 pi_index,
enum ecore_coalescing_fsm coalescing_fsm,
u8 timeset);
/**
*
* @brief ecore_int_igu_enable_int - enable device interrupts
*
* @param p_hwfn
* @param p_ptt
* @param int_mode - interrupt mode to use
*/
void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode);
/**
*
* @brief ecore_int_igu_disable_int - disable device interrupts
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
*
* @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
* register from igu.
*
* @param p_hwfn
*
* @return u64
*/
u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
#define ECORE_SP_SB_ID 0xffff
/**
* @brief ecore_int_sb_init - Initializes the sb_info structure.
*
* once the structure is initialized it can be passed to sb related functions.
*
* @param p_hwfn
* @param p_ptt
* @param sb_info points to an uninitialized (but
* allocated) sb_info structure
* @param sb_virt_addr
* @param sb_phy_addr
* @param sb_id the sb_id to be used (zero based in driver)
* should use ECORE_SP_SB_ID for SP Status block
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr, u16 sb_id);
/**
* @brief ecore_int_sb_setup - Setup the sb.
*
* @param p_hwfn
* @param p_ptt
* @param sb_info initialized sb_info structure
*/
void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
/**
* @brief ecore_int_sb_release - releases the sb_info structure.
*
* once the structure is released, it's memory can be freed
*
* @param p_hwfn
* @param sb_info points to an allocated sb_info structure
* @param sb_id the sb_id to be used (zero based in driver)
* should never be equal to ECORE_SP_SB_ID
* (SP Status block)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
struct ecore_sb_info *sb_info,
u16 sb_id);
/**
* @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
* default status block.
*
* @param p_hwfn - pointer to hwfn
*
*/
void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
/**
* @brief ecore_int_get_num_sbs - get the number of status
* blocks configured for this funciton in the igu.
*
* @param p_hwfn
* @param p_sb_cnt_info
*
* @return
*/
void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_sb_cnt_info *p_sb_cnt_info);
/**
* @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
* release. The API need to be called after releasing all slowpath IRQs
* of the device.
*
* @param p_dev
*
*/
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
#endif

View File

@ -0,0 +1,115 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __IRO_H__
#define __IRO_H__
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
/* Tstorm port statistics */
#define TSTORM_PORT_STAT_OFFSET(port_id) \
(IRO[1].base + ((port_id) * IRO[1].m1))
#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
/* Ustorm VF-PF Channel ready flag */
#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
(IRO[3].base + ((vf_id) * IRO[3].m1))
#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
/* Ustorm Final flr cleanup ack */
#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
(IRO[4].base + ((pf_id) * IRO[4].m1))
#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
/* Ustorm Event ring consumer */
#define USTORM_EQE_CONS_OFFSET(pf_id) \
(IRO[5].base + ((pf_id) * IRO[5].m1))
#define USTORM_EQE_CONS_SIZE (IRO[5].size)
/* Ustorm Common Queue ring consumer */
#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
(IRO[6].base + ((global_queue_id) * IRO[6].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size)
/* Xstorm Integration Test Data */
#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
/* Ystorm Integration Test Data */
#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
/* Pstorm Integration Test Data */
#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
/* Tstorm Integration Test Data */
#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
/* Mstorm Integration Test Data */
#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Ustorm Integration Test Data */
#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Mstorm queue statistics */
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[17].base + ((stat_counter_id) * IRO[17].m1))
#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size)
/* Mstorm producers */
#define MSTORM_PRODS_OFFSET(queue_id) \
(IRO[18].base + ((queue_id) * IRO[18].m1))
#define MSTORM_PRODS_SIZE (IRO[18].size)
/* TPA agregation timeout in us resolution (on ASIC) */
#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base)
#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size)
/* Ustorm queue statistics */
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[20].base + ((stat_counter_id) * IRO[20].m1))
#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
/* Ustorm queue zone */
#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
(IRO[21].base + ((queue_id) * IRO[21].m1))
#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size)
/* Pstorm queue statistics */
#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[22].base + ((stat_counter_id) * IRO[22].m1))
#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size)
/* Tstorm last parser message */
#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base)
#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size)
/* Tstorm Eth limit Rx rate */
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
(IRO[24].base + ((pf_id) * IRO[24].m1))
#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size)
/* Ystorm queue zone */
#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
(IRO[25].base + ((queue_id) * IRO[25].m1))
#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size)
/* Ystorm cqe producer */
#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
(IRO[26].base + ((rss_id) * IRO[26].m1))
#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size)
/* Ustorm cqe producer */
#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
(IRO[27].base + ((rss_id) * IRO[27].m1))
#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size)
/* Ustorm grq producer */
#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
(IRO[28].base + ((pf_id) * IRO[28].m1))
#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size)
/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
(IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
(IRO[30].base + ((func_id) * IRO[30].m1) + ((bdq_id) * IRO[30].m2))
#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[30].size)
/* Mstorm rq-cons of given queue-id */
#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
(IRO[31].base + ((rq_queue_id) * IRO[31].m1))
#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[31].size)
/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
(IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
#endif /* __IRO_H__ */

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
static const struct iro iro_arr[44] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4db0, 0x60, 0x0, 0x0, 0x60},
{0x6418, 0x20, 0x0, 0x0, 0x20},
{0x500, 0x8, 0x0, 0x0, 0x4},
{0x480, 0x8, 0x0, 0x0, 0x4},
{0x0, 0x8, 0x0, 0x0, 0x2},
{0x80, 0x8, 0x0, 0x0, 0x2},
{0x4938, 0x0, 0x0, 0x0, 0x78},
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4d38, 0x0, 0x0, 0x0, 0x78},
{0x56c8, 0x0, 0x0, 0x0, 0x78},
{0x7e48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
{0x61f8, 0x10, 0x0, 0x0, 0x10},
{0xb500, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
{0x5898, 0x40, 0x0, 0x0, 0x40},
{0x1f8, 0x10, 0x0, 0x0, 0x8},
{0xa228, 0x0, 0x0, 0x0, 0x4},
{0x8050, 0x40, 0x0, 0x0, 0x30},
{0xcf8, 0x8, 0x0, 0x0, 0x8},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
{0xadf0, 0x0, 0x0, 0x0, 0xf0},
{0xaee0, 0x8, 0x0, 0x0, 0x8},
{0x80, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
{0x24f8, 0x8, 0x0, 0x0, 0x8},
{0x0, 0x8, 0x0, 0x0, 0x8},
{0x200, 0x10, 0x8, 0x0, 0x8},
{0x17f8, 0x8, 0x0, 0x0, 0x2},
{0x19f8, 0x10, 0x8, 0x0, 0x2},
{0xd988, 0x38, 0x0, 0x0, 0x24},
{0x11040, 0x10, 0x0, 0x0, 0x8},
{0x11670, 0x38, 0x0, 0x0, 0x18},
{0xaeb8, 0x30, 0x0, 0x0, 0x10},
{0x86f8, 0x28, 0x0, 0x0, 0x18},
{0xebf8, 0x10, 0x0, 0x0, 0x10},
{0xde08, 0x40, 0x0, 0x0, 0x30},
{0x121a0, 0x38, 0x0, 0x0, 0x8},
{0xf060, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x50a0, 0x10, 0x0, 0x0, 0x10},
};
#endif /* __IRO_VALUES_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,304 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_MCP_H__
#define __ECORE_MCP_H__
#include "bcm_osal.h"
#include "mcp_public.h"
#include "ecore_mcp_api.h"
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
* required during hw-init will be placed in their correct place in shmem
* we need it in ecore_dev.c [for readin the nvram reflection in shmem].
*/
#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \
((rel_pfid) | \
((p_hwfn)->abs_pf_id & 1) << 3) : \
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
/* TODO - this is only correct as long as only BB is supported, and
* no port-swapping is implemented; Afterwards we'll need to fix it.
*/
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->p_dev->num_ports_in_engines * 2))
struct ecore_mcp_info {
osal_spinlock_t lock; /* Spinlock used for accessing MCP mailbox */
u32 public_base; /* Address of the MCP public area */
u32 drv_mb_addr; /* Address of the driver mailbox */
u32 mfw_mb_addr; /* Address of the MFW mailbox */
u32 port_addr; /* Address of the port configuration (link) */
u16 drv_mb_seq; /* Current driver mailbox sequence */
u16 drv_pulse_seq; /* Current driver pulse sequence */
struct ecore_mcp_link_params link_input;
struct ecore_mcp_link_state link_output;
struct ecore_mcp_link_capabilities link_capabilities;
struct ecore_mcp_function_info func_info;
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
u16 mcp_hist;
};
/**
* @brief Initialize the interface with the MCP
*
* @param p_hwfn - HW func
* @param p_ptt - PTT required for register access
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Initialize the port interface with the MCP
*
* @param p_hwfn
* @param p_ptt
* Can only be called after `num_ports_in_engines' is set
*/
void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Releases resources allocated during the init process.
*
* @param p_hwfn - HW func
* @param p_ptt - PTT required for register access
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn);
/**
* @brief This function is called from the DPC context. After
* pointing PTT to the mfw mb, check for events sent by the MCP
* to the driver and ack them. In case a critical event
* detected, it will be handled here, otherwise the work will be
* queued to a sleepable work-queue.
*
* @param p_hwfn - HW function
* @param p_ptt - PTT required for register access
* @return enum _ecore_status_t - ECORE_SUCCESS - operation
* was successul.
*/
enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief When MFW doesn't get driver pulse for couple of seconds, at some
* threshold before timeout expires, it will generate interrupt
* through a dedicated status block (DPSB - Driver Pulse Status
* Block), which the driver should respond immediately, by
* providing keepalive indication after setting the PTT to the
* driver-MFW mailbox. This function is called directly from the
* DPC upon receiving the DPSB attention.
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @return enum _ecore_status_t - ECORE_SUCCESS - operation
* was successul.
*/
enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Sends a LOAD_REQ to the MFW, and in case operation
* succeed, returns whether this PF is the first on the
* chip/engine/port or function. This function should be
* called when driver is ready to accept MFW events after
* Storms initializations are done.
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param p_load_code - The MCP response param containing one
* of the following:
* FW_MSG_CODE_DRV_LOAD_ENGINE
* FW_MSG_CODE_DRV_LOAD_PORT
* FW_MSG_CODE_DRV_LOAD_FUNCTION
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successul.
* ECORE_BUSY - Operation failed
*/
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_load_code);
/**
* @brief Read the MFW mailbox into Current buffer.
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief Ack to mfw that driver finished FLR process for VFs
*
* @param p_hwfn
* @param p_ptt
* @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
*
* @param return enum _ecore_status_t - ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *vfs_to_ack);
/**
* @brief - calls during init to read shmem of all function-related info.
*
* @param p_hwfn
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Reset the MCP using mailbox command.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Sets the union data in the MCP mailbox and sends a mailbox command.
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param cmd - command to be sent to the MCP
* @param param - optional param
* @param p_union_data - pointer to a drv_union_data
* @param o_mcp_resp - the MCP response code (exclude sequence)
* @param o_mcp_param - optional parameter provided by the MCP response
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - operation was successful
* ECORE_BUSY - operation failed
*/
enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd, u32 param,
union drv_union_data *p_union_data,
u32 *o_mcp_resp,
u32 *o_mcp_param);
/**
* @brief - Sends an NVM write command request to the MFW with
* payload.
*
* @param p_hwfn
* @param p_ptt
* @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
* DRV_MSG_CODE_NVM_PUT_FILE_DATA
* @param param - [0:23] - Offset [24:31] - Size
* @param o_mcp_resp - MCP response
* @param o_mcp_param - MCP response param
* @param i_txn_size - Buffer size
* @param i_buf - Pointer to the buffer
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
u32 i_txn_size, u32 *i_buf);
/**
* @brief - Sends an NVM read command request to the MFW to get
* a buffer.
*
* @param p_hwfn
* @param p_ptt
* @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
* DRV_MSG_CODE_NVM_READ_NVRAM commands
* @param param - [0:23] - Offset [24:31] - Size
* @param o_mcp_resp - MCP response
* @param o_mcp_param - MCP response param
* @param o_txn_size - Buffer size output
* @param o_buf - Pointer to the buffer returned by the MFW.
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
u32 *o_txn_size, u32 *o_buf);
/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
*
* @return true iff MFW is running and mcp_info is initialized
*/
bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn);
/**
* @brief request MFW to configure MSI-X for a VF
*
* @param p_hwfn
* @param p_ptt
* @param vf_id - absolute inside engine
* @param num_sbs - number of entries to request
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vf_id, u8 num);
/**
* @brief - Halt the MCP.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Wake up the MCP.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_link_state *p_link,
u8 max_bw);
int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_link_state *p_link,
u8 min_bw);
enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 mask_parities);
#endif /* __ECORE_MCP_H__ */

View File

@ -0,0 +1,611 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_MCP_API_H__
#define __ECORE_MCP_API_H__
#include "ecore_status.h"
struct ecore_mcp_link_speed_params {
bool autoneg;
u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
u32 forced_speed; /* In Mb/s */
};
struct ecore_mcp_link_pause_params {
bool autoneg;
bool forced_rx;
bool forced_tx;
};
struct ecore_mcp_link_params {
struct ecore_mcp_link_speed_params speed;
struct ecore_mcp_link_pause_params pause;
u32 loopback_mode; /* in PMM_LOOPBACK values */
};
struct ecore_mcp_link_capabilities {
u32 speed_capabilities;
};
struct ecore_mcp_link_state {
bool link_up;
u32 line_speed; /* In Mb/s */
u32 min_pf_rate; /* In Mb/s */
u32 speed; /* In Mb/s */
bool full_duplex;
bool an;
bool an_complete;
bool parallel_detection;
bool pfc_enabled;
#define ECORE_LINK_PARTNER_SPEED_1G_HD (1 << 0)
#define ECORE_LINK_PARTNER_SPEED_1G_FD (1 << 1)
#define ECORE_LINK_PARTNER_SPEED_10G (1 << 2)
#define ECORE_LINK_PARTNER_SPEED_20G (1 << 3)
#define ECORE_LINK_PARTNER_SPEED_25G (1 << 4)
#define ECORE_LINK_PARTNER_SPEED_40G (1 << 5)
#define ECORE_LINK_PARTNER_SPEED_50G (1 << 6)
#define ECORE_LINK_PARTNER_SPEED_100G (1 << 7)
u32 partner_adv_speed;
bool partner_tx_flow_ctrl_en;
bool partner_rx_flow_ctrl_en;
#define ECORE_LINK_PARTNER_SYMMETRIC_PAUSE (1)
#define ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
#define ECORE_LINK_PARTNER_BOTH_PAUSE (3)
u8 partner_adv_pause;
bool sfp_tx_fault;
};
struct ecore_mcp_function_info {
u8 pause_on_host;
enum ecore_pci_personality protocol;
u8 bandwidth_min;
u8 bandwidth_max;
u8 mac[ETH_ALEN];
u64 wwn_port;
u64 wwn_node;
#define ECORE_MCP_VLAN_UNSET (0xffff)
u16 ovlan;
};
struct ecore_mcp_nvm_common {
u32 offset;
u32 param;
u32 resp;
u32 cmd;
};
struct ecore_mcp_nvm_rd {
u32 *buf_size;
u32 *buf;
};
struct ecore_mcp_nvm_wr {
u32 buf_size;
u32 *buf;
};
struct ecore_mcp_nvm_params {
#define ECORE_MCP_CMD (1 << 0)
#define ECORE_MCP_NVM_RD (1 << 1)
#define ECORE_MCP_NVM_WR (1 << 2)
u8 type;
struct ecore_mcp_nvm_common nvm_common;
union {
struct ecore_mcp_nvm_rd nvm_rd;
struct ecore_mcp_nvm_wr nvm_wr;
};
};
struct ecore_mcp_drv_version {
u32 version;
u8 name[MCP_DRV_VER_STR_SIZE - 4];
};
struct ecore_mcp_lan_stats {
u64 ucast_rx_pkts;
u64 ucast_tx_pkts;
u32 fcs_err;
};
#ifndef ECORE_PROTO_STATS
#define ECORE_PROTO_STATS
enum ecore_mcp_protocol_type {
ECORE_MCP_LAN_STATS,
};
union ecore_mcp_protocol_stats {
struct ecore_mcp_lan_stats lan_stats;
};
#endif
enum ecore_ov_config_method {
ECORE_OV_CONFIG_MTU,
ECORE_OV_CONFIG_MAC,
ECORE_OV_CONFIG_WOL
};
enum ecore_ov_client {
ECORE_OV_CLIENT_DRV,
ECORE_OV_CLIENT_USER
};
enum ecore_ov_driver_state {
ECORE_OV_DRIVER_STATE_NOT_LOADED,
ECORE_OV_DRIVER_STATE_DISABLED,
ECORE_OV_DRIVER_STATE_ACTIVE
};
#define ECORE_MAX_NPIV_ENTRIES 128
#define ECORE_WWN_SIZE 8
struct ecore_fc_npiv_tbl {
u32 count;
u8 wwpn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
u8 wwnn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
};
#ifndef __EXTRACT__LINUX__
enum ecore_led_mode {
ECORE_LED_MODE_OFF,
ECORE_LED_MODE_ON,
ECORE_LED_MODE_RESTORE
};
#endif
/**
* @brief - returns the link params of the hw function
*
* @param p_hwfn
*
* @returns pointer to link params
*/
struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *);
/**
* @brief - return the link state of the hw function
*
* @param p_hwfn
*
* @returns pointer to link state
*/
struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *);
/**
* @brief - return the link capabilities of the hw function
*
* @param p_hwfn
*
* @returns pointer to link capabilities
*/
struct ecore_mcp_link_capabilities
*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn);
/**
* @brief Request the MFW to set the the link according to 'link_input'.
*
* @param p_hwfn
* @param p_ptt
* @param b_up - raise link if `true'. Reset link if `false'.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, bool b_up);
/**
* @brief Get the management firmware version value
*
* @param p_dev - ecore dev pointer
* @param p_ptt
* @param p_mfw_ver - mfw version value
* @param p_running_bundle_id - image id in nvram; Optional.
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
struct ecore_ptt *p_ptt,
u32 *p_mfw_ver,
u32 *p_running_bundle_id);
/**
* @brief Get media type value of the port.
*
* @param p_dev - ecore dev pointer
* @param mfw_ver - media type value
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
u32 *media_type);
/**
* @brief - Sends a command to the MCP mailbox.
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param cmd - command to be sent to the MCP
* @param param - optional param
* @param o_mcp_resp - the MCP response code (exclude sequence)
* @param o_mcp_param - optional parameter provided by the MCP response
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - operation was successful
* ECORE_BUSY - operation failed
*/
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 cmd, u32 param,
u32 *o_mcp_resp, u32 *o_mcp_param);
/**
* @brief - drains the nig, allowing completion to pass in case of pauses.
* (Should be called only from sleepable context)
*
* @param p_hwfn
* @param p_ptt
*/
enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - return the mcp function info of the hw function
*
* @param p_hwfn
*
* @returns pointer to mcp function info
*/
const struct ecore_mcp_function_info
*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
/**
* @brief - Function for reading/manipulating the nvram. Following are supported
* functionalities.
* 1. Read: Read the specified nvram offset.
* input values:
* type - ECORE_MCP_NVM_RD
* cmd - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM)
* offset - nvm offset
*
* output values:
* buf - buffer
* buf_size - buffer size
*
* 2. Write: Write the data at the specified nvram offset
* input values:
* type - ECORE_MCP_NVM_WR
* cmd - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM)
* offset - nvm offset
* buf - buffer
* buf_size - buffer size
*
* 3. Command: Send the NVM command to MCP.
* input values:
* type - ECORE_MCP_CMD
* cmd - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE)
* offset - nvm offset
*
*
* @param p_hwfn
* @param p_ptt
* @param params
*
* @return ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_nvm_params *params);
/**
* @brief - count number of function with a matching personality on engine.
*
* @param p_hwfn
* @param p_ptt
* @param personalities - a bitmask of ecore_pci_personality values
*
* @returns the count of all devices on engine whose personality match one of
* the bitsmasks.
*/
int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 personalities);
/**
* @brief Get the flash size value
*
* @param p_hwfn
* @param p_ptt
* @param p_flash_size - flash size in bytes to be filled.
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_flash_size);
/**
* @brief Send driver version to MFW
*
* @param p_hwfn
* @param p_ptt
* @param version - Version value
* @param name - Protocol driver name
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver);
/**
* @brief Read the MFW process kill counter
*
* @param p_hwfn
* @param p_ptt
*
* @return u32
*/
u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Trigger a recovery process
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Notify MFW about the change in base device properties
*
* @param p_hwfn
* @param p_ptt
* @param config - Configuation that has been updated
* @param client - ecore client type
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_ov_config_method config,
enum ecore_ov_client client);
/**
* @brief Notify MFW about the driver state
*
* @param p_hwfn
* @param p_ptt
* @param drv_state - Driver state
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_ov_driver_state drv_state);
/**
* @brief Read NPIV settings form the MFW
*
* @param p_hwfn
* @param p_ptt
* @param p_table - Array to hold the FC NPIV data. Client need allocate the
* required buffer. The field 'count' specifies number of NPIV
* entries. A value of 0 means the table was not populated.
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_fc_npiv_tbl *p_table);
/**
* @brief Send MTU size to MFW
*
* @param p_hwfn
* @param p_ptt
* @param mtu - MTU size
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mtu);
/**
* @brief Set LED status
*
* @param p_hwfn
* @param p_ptt
* @param mode - LED mode
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_led_mode mode);
/**
* @brief Set secure mode
*
* @param p_dev
* @param addr - nvm offset
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
u32 addr);
/**
* @brief Write to phy
*
* @param p_dev
* @param addr - nvm offset
* @param cmd - nvm command
* @param p_buf - nvm write buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len);
/**
* @brief Write to nvm
*
* @param p_dev
* @param addr - nvm offset
* @param cmd - nvm command
* @param p_buf - nvm write buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len);
/**
* @brief Put file begin
*
* @param p_dev
* @param addr - nvm offset
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
u32 addr);
/**
* @brief Delete file
*
* @param p_dev
* @param addr - nvm offset
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr);
/**
* @brief Check latest response
*
* @param p_dev
* @param p_buf - nvm write buffer
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
/**
* @brief Read from phy
*
* @param p_dev
* @param addr - nvm offset
* @param cmd - nvm command
* @param p_buf - nvm write buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len);
/**
* @brief Read from nvm
*
* @param p_dev
* @param addr - nvm offset
* @param p_buf - nvm write buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
u8 *p_buf, u32 len);
/**
* @brief Read from sfp
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param port - transceiver port
* @param addr - I2C address
* @param offset - offset in sfp
* @param len - buffer length
* @param p_buf - buffer to read into
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf);
/**
* @brief Write to sfp
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param port - transceiver port
* @param addr - I2C address
* @param offset - offset in sfp
* @param len - buffer length
* @param p_buf - buffer to write from
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf);
/**
* @brief Gpio read
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param gpio - gpio number
* @param gpio_val - value read from gpio
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 gpio, u32 *gpio_val);
/**
* @brief Gpio write
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param gpio - gpio number
* @param gpio_val - value to write to gpio
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 gpio, u16 gpio_val);
#endif

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_PROTO_IF_H__
#define __ECORE_PROTO_IF_H__
/*
* PF parameters (according to personality/protocol)
*/
struct ecore_eth_pf_params {
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
};
struct ecore_pf_params {
struct ecore_eth_pf_params eth_pf_params;
};
#endif

View File

@ -0,0 +1,446 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __RT_DEFS_H__
#define __RT_DEFS_H__
/* Runtime array offsets */
#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
#define CAU_REG_PI_MEMORY_RT_SIZE 4416
#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
#define SRC_REG_FIRSTFREE_RT_SIZE 2
#define SRC_REG_LASTFREE_RT_OFFSET 6667
#define SRC_REG_LASTFREE_RT_SIZE 2
#define SRC_REG_COUNTFREE_RT_OFFSET 6669
#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
#define QM_REG_VOQCRDLINE_RT_SIZE 20
#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
#define QM_REG_RLGLBLCRD_RT_SIZE 256
#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
#define QM_REG_RLPFINCVAL_RT_SIZE 16
#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
#define QM_REG_RLPFCRD_RT_OFFSET 30799
#define QM_REG_RLPFCRD_RT_SIZE 16
#define QM_REG_RLPFENABLE_RT_OFFSET 30815
#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
#define QM_REG_WFQPFCRD_RT_OFFSET 30849
#define QM_REG_WFQPFCRD_RT_SIZE 160
#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
#define QM_REG_TXPQMAP_RT_OFFSET 31523
#define QM_REG_TXPQMAP_RT_SIZE 512
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
#define QM_REG_WFQVPCRD_RT_OFFSET 32547
#define QM_REG_WFQVPCRD_RT_SIZE 512
#define QM_REG_WFQVPMAP_RT_OFFSET 33059
#define QM_REG_WFQVPMAP_RT_SIZE 512
#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33848
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33849
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33850
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33851
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33852
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33853
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33854
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33855
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33856
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33857
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33858
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33859
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33860
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33861
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33862
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33863
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33864
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33865
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33866
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33867
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33868
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33869
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33870
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33871
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33872
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33873
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33874
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33875
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33876
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33877
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33878
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33879
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33880
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33881
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33882
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33883
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33884
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33885
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33886
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33887
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33888
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33889
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33890
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33891
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33892
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33893
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33894
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33895
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33896
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33897
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33898
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33899
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33900
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33901
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33902
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33903
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33904
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33905
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33906
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33907
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33908
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33909
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33910
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33911
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33912
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33913
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33914
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33915
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33916
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33917
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33918
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33919
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33920
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33921
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33922
#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33923
#define RUNTIME_ARRAY_SIZE 33924
#endif /* __RT_DEFS_H__ */

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SP_API_H__
#define __ECORE_SP_API_H__
#include "ecore_status.h"
enum spq_mode {
ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
ECORE_SPQ_MODE_CB, /* Client supplies a callback */
ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */
};
struct ecore_hwfn;
union event_ring_data;
struct eth_slow_path_rx_cqe;
struct ecore_spq_comp_cb {
void (*function)(struct ecore_hwfn *,
void *, union event_ring_data *, u8 fw_return_code);
void *cookie;
};
/**
* @brief ecore_eth_cqe_completion - handles the completion of a
* ramrod on the cqe ring
*
* @param p_hwfn
* @param cqe
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
#endif

View File

@ -0,0 +1,521 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_status.h"
#include "ecore_chain.h"
#include "ecore_spq.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_cxt.h"
#include "ecore_sp_commands.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
#include "reg_addr.h"
#include "ecore_int.h"
#include "ecore_hw.h"
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
u8 cmd,
u8 protocol,
struct ecore_sp_init_data *p_data)
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct ecore_spq_entry *p_ent = OSAL_NULL;
enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Get an SPQ entry */
rc = ecore_spq_get_entry(p_hwfn, pp_ent);
if (rc != ECORE_SUCCESS)
return rc;
/* Fill the SPQ entry */
p_ent = *pp_ent;
p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
p_ent->elem.hdr.cmd_id = cmd;
p_ent->elem.hdr.protocol_id = protocol;
p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
p_ent->comp_mode = p_data->comp_mode;
p_ent->comp_done.done = 0;
switch (p_ent->comp_mode) {
case ECORE_SPQ_MODE_EBLOCK:
p_ent->comp_cb.cookie = &p_ent->comp_done;
break;
case ECORE_SPQ_MODE_BLOCK:
if (!p_data->p_comp_data)
return ECORE_INVAL;
p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
case ECORE_SPQ_MODE_CB:
if (!p_data->p_comp_data)
p_ent->comp_cb.function = OSAL_NULL;
else
p_ent->comp_cb = *p_data->p_comp_data;
break;
default:
DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
return ECORE_INVAL;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
opaque_cid, cmd, protocol,
(unsigned long)&p_ent->ramrod,
D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return ECORE_SUCCESS;
}
static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
{
switch (type) {
case ECORE_TUNN_CLSS_MAC_VLAN:
return TUNNEL_CLSS_MAC_VLAN;
case ECORE_TUNN_CLSS_MAC_VNI:
return TUNNEL_CLSS_MAC_VNI;
case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
return TUNNEL_CLSS_INNER_MAC_VLAN;
case ECORE_TUNN_CLSS_INNER_MAC_VNI:
return TUNNEL_CLSS_INNER_MAC_VNI;
default:
return TUNNEL_CLSS_MAC_VLAN;
}
}
static void
ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
struct ecore_tunn_update_params *p_src,
struct pf_update_tunnel_config *p_tunn_cfg)
{
unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
unsigned long update_mask = p_src->tunn_mode_update_mask;
unsigned long tunn_mode = p_src->tunn_mode;
unsigned long new_tunn_mode = 0;
if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
} else {
if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
}
if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
} else {
if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
}
if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
} else {
if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
}
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
if (p_src->update_geneve_udp_port)
DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
p_src->update_geneve_udp_port = 0;
p_src->tunn_mode = new_tunn_mode;
return;
}
if (p_src->update_geneve_udp_port) {
p_tunn_cfg->set_geneve_udp_port_flg = 1;
p_tunn_cfg->geneve_udp_port =
OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
}
if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
} else {
if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
}
if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
} else {
if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
}
p_src->tunn_mode = new_tunn_mode;
}
static void
ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
struct ecore_tunn_update_params *p_src,
struct pf_update_tunnel_config *p_tunn_cfg)
{
unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type;
ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
p_tunn_cfg->tunnel_clss_vxlan = type;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
p_tunn_cfg->tunnel_clss_l2gre = type;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
p_tunn_cfg->tunnel_clss_ipgre = type;
if (p_src->update_vxlan_udp_port) {
p_tunn_cfg->set_vxlan_udp_port_flg = 1;
p_tunn_cfg->vxlan_udp_port =
OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
}
if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_l2gre = 1;
if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgre = 1;
if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_vxlan = 1;
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
if (p_src->update_geneve_udp_port)
DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
p_src->update_geneve_udp_port = 0;
return;
}
if (p_src->update_geneve_udp_port) {
p_tunn_cfg->set_geneve_udp_port_flg = 1;
p_tunn_cfg->geneve_udp_port =
OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
}
if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_l2geneve = 1;
if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgeneve = 1;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
p_tunn_cfg->tunnel_clss_l2geneve = type;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
p_tunn_cfg->tunnel_clss_ipgeneve = type;
}
static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
unsigned long tunn_mode)
{
u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
u8 l2geneve_enable = 0, ipgeneve_enable = 0;
if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
l2gre_enable = 1;
if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
ipgre_enable = 1;
if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
vxlan_enable = 1;
ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
if (ECORE_IS_BB_A0(p_hwfn->p_dev))
return;
if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
l2geneve_enable = 1;
if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
ipgeneve_enable = 1;
ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
ipgeneve_enable);
}
static void
ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
struct ecore_tunn_start_params *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
unsigned long tunn_mode;
enum tunnel_clss type;
if (!p_src)
return;
tunn_mode = p_src->tunn_mode;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
p_tunn_cfg->tunnel_clss_vxlan = type;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
p_tunn_cfg->tunnel_clss_l2gre = type;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
p_tunn_cfg->tunnel_clss_ipgre = type;
if (p_src->update_vxlan_udp_port) {
p_tunn_cfg->set_vxlan_udp_port_flg = 1;
p_tunn_cfg->vxlan_udp_port =
OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
}
if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_l2gre = 1;
if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgre = 1;
if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_vxlan = 1;
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
if (p_src->update_geneve_udp_port)
DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
p_src->update_geneve_udp_port = 0;
return;
}
if (p_src->update_geneve_udp_port) {
p_tunn_cfg->set_geneve_udp_port_flg = 1;
p_tunn_cfg->geneve_udp_port =
OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
}
if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_l2geneve = 1;
if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgeneve = 1;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
p_tunn_cfg->tunnel_clss_l2geneve = type;
type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
p_tunn_cfg->tunnel_clss_ipgeneve = type;
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_tunn_start_params *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
u8 page_cnt;
/* update initial eq producer */
ecore_eq_prod_update(p_hwfn,
ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
/* Initialize the SPQ entry for the ramrod */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
PROTOCOLID_COMMON, &init_data);
if (rc != ECORE_SUCCESS)
return rc;
/* Fill the ramrod data */
p_ramrod = &p_ent->ramrod.pf_start;
p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
p_ramrod->event_ring_sb_index = sb_index;
p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* For easier debugging */
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
switch (mode) {
case ECORE_MF_DEFAULT:
case ECORE_MF_NPAR:
p_ramrod->mf_mode = MF_NPAR;
break;
case ECORE_MF_OVLAN:
p_ramrod->mf_mode = MF_OVLAN;
break;
default:
DP_NOTICE(p_hwfn, true,
"Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR;
}
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
p_hwfn->p_eq->chain.pbl.p_phys_table);
page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
p_ramrod->personality = PERSONALITY_ETH;
break;
default:
DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
p_hwfn->hw_info.personality);
p_ramrod->personality = PERSONALITY_ETH;
}
p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf;
p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs;
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index, p_ramrod->outer_tag);
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (p_tunn) {
ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->tunn_mode);
p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
}
return rc;
}
enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_CB;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_tunn_update_params *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
&p_ent->ramrod.pf_update.tunnel_config);
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if ((rc == ECORE_SUCCESS) && p_tunn) {
if (p_tunn->update_vxlan_udp_port)
ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->vxlan_udp_port);
if (p_tunn->update_geneve_udp_port)
ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->geneve_udp_port);
ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->tunn_mode);
p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
}
return rc;
}
enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
{
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}

View File

@ -0,0 +1,137 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SP_COMMANDS_H__
#define __ECORE_SP_COMMANDS_H__
#include "ecore.h"
#include "ecore_spq.h"
#include "ecore_sp_api.h"
#define ECORE_SP_EQ_COMPLETION 0x01
#define ECORE_SP_CQE_COMPLETION 0x02
struct ecore_sp_init_data {
/* The CID and FID aren't necessarily derived from hwfn,
* e.g., in IOV scenarios. CID might defer between SPQ and
* other elements.
*/
u32 cid;
u16 opaque_fid;
/* Information regarding operation upon sending & completion */
enum spq_mode comp_mode;
struct ecore_spq_comp_cb *p_comp_data;
};
/**
* @brief Acquire and initialize and SPQ entry for a given ramrod.
*
* @param p_hwfn
* @param pp_ent - will be filled with a pointer to an entry upon success
* @param cmd - dependent upon protocol
* @param protocol
* @param p_data - various configuration required for ramrod
*
* @return ECORE_SUCCESS upon success, otherwise failure.
*/
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
u8 cmd,
u8 protocol,
struct ecore_sp_init_data *p_data);
/**
* @brief ecore_sp_pf_start - PF Function Start Ramrod
*
* This ramrod is sent to initialize a physical function (PF). It will
* configure the function related parameters and write its completion to the
* event ring specified in the parameters.
*
* Ramrods complete on the common event ring for the PF. This ring is
* allocated by the driver on host memory and its parameters are written
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
* @param p_tunn - pf start tunneling configuration
* @param mode
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_tunn_start_params *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
/**
* @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
* update Ramrod
*
* This ramrod is sent to update a tunneling configuration
* for a physical function (PF).
*
* @param p_hwfn
* @param p_tunn - pf update tunneling parameters
* @param comp_mode - completion mode
* @param p_comp_data - callback function
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_tunn_update_params *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
/**
* @brief ecore_sp_pf_update - PF Function Update Ramrod
*
* This ramrod updates function-related parameters. Every parameter can be
* updated independently, according to configuration flags.
*
* @note Final phase API.
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_pf_stop - PF Function Stop Ramrod
*
* This ramrod is sent to close a Physical Function (PF). It is the last ramrod
* sent and the last completion written to the PFs Event Ring. This ramrod also
* deletes the context for the Slowhwfn connection on this PF.
*
* @note Not required for first packet.
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn);
#endif /*__ECORE_SP_COMMANDS_H__*/

View File

@ -0,0 +1,937 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_hsi_common.h"
#include "ecore.h"
#include "ecore_sp_api.h"
#include "ecore_spq.h"
#include "ecore_iro.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_cxt.h"
#include "ecore_int.h"
#include "ecore_dev_api.h"
#include "ecore_mcp.h"
#include "ecore_hw.h"
/***************************************************************************
* Structures & Definitions
***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
#define SPQ_BLOCK_SLEEP_LENGTH (1000)
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
***************************************************************************/
static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
void *cookie,
union event_ring_data *data,
u8 fw_return_code)
{
struct ecore_spq_comp_done *comp_done;
comp_done = (struct ecore_spq_comp_done *)cookie;
comp_done->done = 0x1;
comp_done->fw_return_code = fw_return_code;
/* make update visible to waiting thread */
OSAL_SMP_WMB(p_hwfn->p_dev);
}
static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent,
u8 *p_fw_ret)
{
int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
struct ecore_spq_comp_done *comp_done;
enum _ecore_status_t rc;
comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
while (sleep_count) {
OSAL_POLL_MODE_DPC(p_hwfn);
/* validate we receive completion update */
OSAL_SMP_RMB(p_hwfn->p_dev);
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return ECORE_SUCCESS;
}
OSAL_MSLEEP(5);
sleep_count--;
}
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
/* Retry after drain */
sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
while (sleep_count) {
/* validate we receive completion update */
OSAL_SMP_RMB(p_hwfn->p_dev);
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return ECORE_SUCCESS;
}
OSAL_MSLEEP(5);
sleep_count--;
}
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return ECORE_SUCCESS;
}
DP_NOTICE(p_hwfn, true,
"Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
return ECORE_BUSY;
}
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
static enum _ecore_status_t
ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
{
p_ent->flags = 0;
switch (p_ent->comp_mode) {
case ECORE_SPQ_MODE_EBLOCK:
case ECORE_SPQ_MODE_BLOCK:
p_ent->comp_cb.function = ecore_spq_blocking_cb;
break;
case ECORE_SPQ_MODE_CB:
break;
default:
DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
return ECORE_INVAL;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
" Data pointer: [%08x:%08x] Completion Mode: %s\n",
p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
p_ent->elem.hdr.protocol_id,
p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
return ECORE_SUCCESS;
}
/***************************************************************************
* HSI access
***************************************************************************/
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq)
{
u16 pq;
struct ecore_cxt_info cxt_info;
struct core_conn_context *p_cxt;
union ecore_qm_pq_params pq_params;
enum _ecore_status_t rc;
cxt_info.iid = p_spq->cid;
rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc < 0) {
DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
p_spq->cid);
return;
}
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
/* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
* XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
*/
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* CDU validation - FIXME currently disabled */
/* QM physical queue */
OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
pq_params.core.tc = LB_TC;
pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
p_cxt->xstorm_st_context.spq_base_lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
p_cxt->xstorm_st_context.spq_base_hi =
DMA_HI_LE(p_spq->chain.p_phys_addr);
p_cxt->xstorm_st_context.consolid_base_addr.lo =
DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
p_cxt->xstorm_st_context.consolid_base_addr.hi =
DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
}
static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq,
struct ecore_spq_entry *p_ent)
{
struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
u16 echo = ecore_chain_get_prod_idx(p_chain);
struct slow_path_element *elem;
struct core_db_data db;
p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
elem = ecore_chain_produce(p_chain);
if (!elem) {
DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
return ECORE_INVAL;
}
*elem = p_ent->elem; /* struct assignment */
/* send a doorbell on the slow hwfn session */
OSAL_MEMSET(&db, 0, sizeof(db));
SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
/* validate producer is up to-date */
OSAL_RMB(p_hwfn->p_dev);
db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
/* do not reorder */
OSAL_BARRIER(p_hwfn->p_dev);
DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
OSAL_MMIOWB(p_hwfn->p_dev);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
" agg_params: %02x, prod: %04x\n",
DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
db.agg_flags, ecore_chain_get_prod_idx(p_chain));
return ECORE_SUCCESS;
}
/***************************************************************************
* Asynchronous events
***************************************************************************/
static enum _ecore_status_t
ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
case PROTOCOLID_COMMON:
return ECORE_SUCCESS;
default:
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return ECORE_INVAL;
}
}
/***************************************************************************
* EQ API
***************************************************************************/
void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
{
u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
/* keep prod updates ordered */
OSAL_MMIOWB(p_hwfn->p_dev);
}
enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
void *cookie)
{
struct ecore_eq *p_eq = cookie;
struct ecore_chain *p_chain = &p_eq->chain;
enum _ecore_status_t rc = 0;
/* take a snapshot of the FW consumer */
u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
/* Need to guarantee the fw_cons index we use points to a usuable
* element (to comply with our chain), so our macros would comply
*/
if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
ecore_chain_get_usable_per_page(p_chain)) {
fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
}
/* Complete current segment of eq entries */
while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
if (!p_eqe) {
rc = ECORE_INVAL;
break;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"op %x prot %x res0 %x echo %x "
"fwret %x flags %x\n", p_eqe->opcode,
p_eqe->protocol_id, /* Event Protocol ID */
p_eqe->reserved0, /* Reserved */
OSAL_LE16_TO_CPU(p_eqe->echo),
p_eqe->fw_return_code, /* FW return code for SP
* ramrods
*/
p_eqe->flags);
if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
if (ecore_async_event_completion(p_hwfn, p_eqe))
rc = ECORE_INVAL;
} else if (ecore_spq_completion(p_hwfn,
p_eqe->echo,
p_eqe->fw_return_code,
&p_eqe->data)) {
rc = ECORE_INVAL;
}
ecore_chain_recycle_consumed(p_chain);
}
ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
return rc;
}
struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
{
struct ecore_eq *p_eq;
/* Allocate EQ struct */
p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
if (!p_eq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_eq'\n");
return OSAL_NULL;
}
/* Allocate and initialize EQ chain */
if (ecore_chain_alloc(p_hwfn->p_dev,
ECORE_CHAIN_USE_TO_PRODUCE,
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element), &p_eq->chain)) {
DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
goto eq_allocate_fail;
}
/* register EQ completion on the SP SB */
ecore_int_register_cb(p_hwfn,
ecore_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
return p_eq;
eq_allocate_fail:
ecore_eq_free(p_hwfn, p_eq);
return OSAL_NULL;
}
void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
{
ecore_chain_reset(&p_eq->chain);
}
void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
{
if (!p_eq)
return;
ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
OSAL_FREE(p_hwfn->p_dev, p_eq);
}
/***************************************************************************
* CQE API - manipulate EQ functionality
***************************************************************************/
static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe
*cqe,
enum protocol_type protocol)
{
/* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe
*/
return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
}
enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe)
{
enum _ecore_status_t rc;
rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
if (rc) {
DP_NOTICE(p_hwfn, true,
"Failed to handle RXQ CQE [cmd 0x%02x]\n",
cqe->ramrod_cmd_id);
}
return rc;
}
/***************************************************************************
* Slow hwfn Queue (spq)
***************************************************************************/
void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_virt = OSAL_NULL;
struct ecore_spq *p_spq = p_hwfn->p_spq;
dma_addr_t p_phys = 0;
u32 i, capacity;
OSAL_LIST_INIT(&p_spq->pending);
OSAL_LIST_INIT(&p_spq->completion_pending);
OSAL_LIST_INIT(&p_spq->free_pool);
OSAL_LIST_INIT(&p_spq->unlimited_pending);
OSAL_SPIN_LOCK_INIT(&p_spq->lock);
/* SPQ empty pool */
p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
p_virt = p_spq->p_virt;
capacity = ecore_chain_get_capacity(&p_spq->chain);
for (i = 0; i < capacity; i++) {
p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
p_virt++;
p_phys += sizeof(struct ecore_spq_entry);
}
/* Statistics */
p_spq->normal_count = 0;
p_spq->comp_count = 0;
p_spq->comp_sent_count = 0;
p_spq->unlimited_pending_count = 0;
OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
p_spq->comp_bitmap_idx = 0;
/* SPQ cid, cannot fail */
ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
ecore_spq_hw_initialize(p_hwfn, p_spq);
/* reset the chain itself */
ecore_chain_reset(&p_spq->chain);
}
enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_virt = OSAL_NULL;
struct ecore_spq *p_spq = OSAL_NULL;
dma_addr_t p_phys = 0;
u32 capacity;
/* SPQ struct */
p_spq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
if (!p_spq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_spq'");
return ECORE_NOMEM;
}
/* SPQ ring */
if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
/* N/A when the mode is SINGLE */
sizeof(struct slow_path_element), &p_spq->chain)) {
DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
goto spq_allocate_fail;
}
/* allocate and fill the SPQ elements (incl. ramrod data list) */
capacity = ecore_chain_get_capacity(&p_spq->chain);
p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
capacity *
sizeof(struct ecore_spq_entry));
if (!p_virt)
goto spq_allocate_fail;
p_spq->p_virt = p_virt;
p_spq->p_phys = p_phys;
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
p_hwfn->p_spq = p_spq;
return ECORE_SUCCESS;
spq_allocate_fail:
ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
OSAL_FREE(p_hwfn->p_dev, p_spq);
return ECORE_NOMEM;
}
void ecore_spq_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
u32 capacity;
if (!p_spq)
return;
if (p_spq->p_virt) {
capacity = ecore_chain_get_capacity(&p_spq->chain);
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
p_spq->p_virt,
p_spq->p_phys,
capacity *
sizeof(struct ecore_spq_entry));
}
ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
OSAL_FREE(p_hwfn->p_dev, p_spq);
}
enum _ecore_status_t
ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
struct ecore_spq_entry *p_ent = OSAL_NULL;
OSAL_SPIN_LOCK(&p_spq->lock);
if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
sizeof(struct ecore_spq_entry));
if (!p_ent) {
OSAL_SPIN_UNLOCK(&p_spq->lock);
DP_NOTICE(p_hwfn, true,
"Failed to allocate an SPQ entry"
" for a pending ramrod\n");
return ECORE_NOMEM;
}
p_ent->queue = &p_spq->unlimited_pending;
} else {
p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
struct ecore_spq_entry, list);
OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
p_ent->queue = &p_spq->pending;
}
*pp_ent = p_ent;
OSAL_SPIN_UNLOCK(&p_spq->lock);
return ECORE_SUCCESS;
}
/* Locked variant; Should be called while the SPQ lock is taken */
static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent)
{
OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
}
void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent)
{
OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
__ecore_spq_return_entry(p_hwfn, p_ent);
OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
}
/**
* @brief ecore_spq_add_entry - adds a new entry to the pending
* list. Should be used while lock is being held.
*
* Addes an entry to the pending list is there is room (en empty
* element is available in the free_pool), or else places the
* entry in the unlimited_pending pool.
*
* @param p_hwfn
* @param p_ent
* @param priority
*
* @return enum _ecore_status_t
*/
static enum _ecore_status_t
ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent, enum spq_priority priority)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) {
if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
OSAL_LIST_PUSH_TAIL(&p_ent->list,
&p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
return ECORE_SUCCESS;
}
struct ecore_spq_entry *p_en2;
p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
struct ecore_spq_entry,
list);
OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
/* Copy the ring element physical pointer to the new
* entry, since we are about to override the entire ring
* entry and don't want to lose the pointer.
*/
p_ent->elem.data_ptr = p_en2->elem.data_ptr;
/* Setting the cookie to the comp_done of the
* new element.
*/
if (p_ent->comp_cb.cookie == &p_ent->comp_done)
p_ent->comp_cb.cookie = &p_en2->comp_done;
*p_en2 = *p_ent;
OSAL_FREE(p_hwfn->p_dev, p_ent);
p_ent = p_en2;
}
/* entry is to be placed in 'pending' queue */
switch (priority) {
case ECORE_SPQ_PRIORITY_NORMAL:
OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
p_spq->normal_count++;
break;
case ECORE_SPQ_PRIORITY_HIGH:
OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
p_spq->high_count++;
break;
default:
return ECORE_INVAL;
}
return ECORE_SUCCESS;
}
/***************************************************************************
* Accessor
***************************************************************************/
u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
{
if (!p_hwfn->p_spq)
return 0xffffffff; /* illegal */
return p_hwfn->p_spq->cid;
}
/***************************************************************************
* Posting new Ramrods
***************************************************************************/
static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
osal_list_t *head,
u32 keep_reserve)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
enum _ecore_status_t rc;
/* TODO - implementation might be wasteful; will always keep room
* for an additional high priority ramrod (even if one is already
* pending FW)
*/
while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
!OSAL_LIST_IS_EMPTY(head)) {
struct ecore_spq_entry *p_ent =
OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
p_spq->comp_sent_count++;
rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
if (rc) {
OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
&p_spq->completion_pending);
__ecore_spq_return_entry(p_hwfn, p_ent);
return rc;
}
}
return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
{
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_spq *p_spq = p_hwfn->p_spq;
struct ecore_spq_entry *p_ent = OSAL_NULL;
while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
break;
p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
struct ecore_spq_entry, list);
if (!p_ent)
return ECORE_INVAL;
OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
}
rc = ecore_spq_post_list(p_hwfn,
&p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
if (rc)
return rc;
return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent,
u8 *fw_return_code)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
bool b_ret_ent = true;
if (!p_hwfn)
return ECORE_INVAL;
if (!p_ent) {
DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
return ECORE_INVAL;
}
if (p_hwfn->p_dev->recov_in_prog) {
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Recovery is in progress -> skip spq post"
" [cmd %02x protocol %02x]",
p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
/* Return success to let the flows to be completed successfully
* w/o any error handling.
*/
return ECORE_SUCCESS;
}
OSAL_SPIN_LOCK(&p_spq->lock);
/* Complete the entry */
rc = ecore_spq_fill_entry(p_hwfn, p_ent);
/* Check return value after LOCK is taken for cleaner error flow */
if (rc)
goto spq_post_fail;
/* Add the request to the pending queue */
rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
if (rc)
goto spq_post_fail;
rc = ecore_spq_pend_post(p_hwfn);
if (rc) {
/* Since it's possible that pending failed for a different
* entry [although unlikely], the failed entry was already
* dealt with; No need to return it here.
*/
b_ret_ent = false;
goto spq_post_fail;
}
OSAL_SPIN_UNLOCK(&p_spq->lock);
if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
/* For entries in ECORE BLOCK mode, the completion code cannot
* perform the necessary cleanup - if it did, we couldn't
* access p_ent here to see whether it's successful or not.
* Thus, after gaining the answer perform the cleanup here.
*/
rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
if (rc)
goto spq_post_fail2;
/* return to pool */
ecore_spq_return_entry(p_hwfn, p_ent);
}
return rc;
spq_post_fail2:
OSAL_SPIN_LOCK(&p_spq->lock);
OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
ecore_chain_return_produced(&p_spq->chain);
spq_post_fail:
/* return to the free pool */
if (b_ret_ent)
__ecore_spq_return_entry(p_hwfn, p_ent);
OSAL_SPIN_UNLOCK(&p_spq->lock);
return rc;
}
enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
__le16 echo,
u8 fw_return_code,
union event_ring_data *p_data)
{
struct ecore_spq *p_spq;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_spq_entry *tmp;
struct ecore_spq_entry *found = OSAL_NULL;
enum _ecore_status_t rc;
if (!p_hwfn)
return ECORE_INVAL;
p_spq = p_hwfn->p_spq;
if (!p_spq)
return ECORE_INVAL;
OSAL_SPIN_LOCK(&p_spq->lock);
OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
tmp,
&p_spq->completion_pending,
list, struct ecore_spq_entry) {
if (p_ent->elem.hdr.echo == echo) {
OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
&p_spq->completion_pending);
/* Avoid overriding of SPQ entries when getting
* out-of-order completions, by marking the completions
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
p_spq->comp_bitmap_idx)) {
SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
p_spq->comp_bitmap_idx);
p_spq->comp_bitmap_idx++;
ecore_chain_return_produced(&p_spq->chain);
}
p_spq->comp_count++;
found = p_ent;
break;
}
/* This is debug and should be relatively uncommon - depends
* on scenarios which have mutliple per-PF sent ramrods.
*/
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Got completion for echo %04x - doesn't match"
" echo %04x in completion pending list\n",
OSAL_LE16_TO_CPU(echo),
OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
}
/* Release lock before callback, as callback may post
* an additional ramrod.
*/
OSAL_SPIN_UNLOCK(&p_spq->lock);
if (!found) {
DP_NOTICE(p_hwfn, true,
"Failed to find an entry this"
" EQE [echo %04x] completes\n",
OSAL_LE16_TO_CPU(echo));
return ECORE_EXISTS;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Complete EQE [echo %04x]: func %p cookie %p)\n",
OSAL_LE16_TO_CPU(echo),
p_ent->comp_cb.function, p_ent->comp_cb.cookie);
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
/* EBLOCK is responsible for freeing its own entry */
ecore_spq_return_entry(p_hwfn, found);
}
/* Attempt to post pending requests */
OSAL_SPIN_LOCK(&p_spq->lock);
rc = ecore_spq_pend_post(p_hwfn);
OSAL_SPIN_UNLOCK(&p_spq->lock);
return rc;
}
struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_consq *p_consq;
/* Allocate ConsQ struct */
p_consq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
if (!p_consq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_consq'\n");
return OSAL_NULL;
}
/* Allocate and initialize EQ chain */
if (ecore_chain_alloc(p_hwfn->p_dev,
ECORE_CHAIN_USE_TO_PRODUCE,
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
ECORE_CHAIN_PAGE_SIZE / 0x80,
0x80, &p_consq->chain)) {
DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
return p_consq;
consq_allocate_fail:
ecore_consq_free(p_hwfn, p_consq);
return OSAL_NULL;
}
void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
{
ecore_chain_reset(&p_consq->chain);
}
void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
{
if (!p_consq)
return;
ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
OSAL_FREE(p_hwfn->p_dev, p_consq);
}

View File

@ -0,0 +1,284 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SPQ_H__
#define __ECORE_SPQ_H__
#include "ecore_hsi_common.h"
#include "ecore_status.h"
#include "ecore_hsi_eth.h"
#include "ecore_chain.h"
#include "ecore_sp_api.h"
union ramrod_data {
struct pf_start_ramrod_data pf_start;
struct pf_update_ramrod_data pf_update;
struct rx_queue_start_ramrod_data rx_queue_start;
struct rx_queue_update_ramrod_data rx_queue_update;
struct rx_queue_stop_ramrod_data rx_queue_stop;
struct tx_queue_start_ramrod_data tx_queue_start;
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
struct core_tx_start_ramrod_data core_tx_queue_start;
struct core_tx_stop_ramrod_data core_tx_queue_stop;
struct vport_filter_update_ramrod_data vport_filter_update;
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
};
#define EQ_MAX_CREDIT 0xffffffff
enum spq_priority {
ECORE_SPQ_PRIORITY_NORMAL,
ECORE_SPQ_PRIORITY_HIGH,
};
union ecore_spq_req_comp {
struct ecore_spq_comp_cb cb;
u64 *done_addr;
};
/* SPQ_MODE_EBLOCK */
struct ecore_spq_comp_done {
u64 done;
u8 fw_return_code;
};
struct ecore_spq_entry {
osal_list_entry_t list;
u8 flags;
/* HSI slow path element */
struct slow_path_element elem;
union ramrod_data ramrod;
enum spq_priority priority;
/* pending queue for this entry */
osal_list_t *queue;
enum spq_mode comp_mode;
struct ecore_spq_comp_cb comp_cb;
struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
};
struct ecore_eq {
struct ecore_chain chain;
u8 eq_sb_index; /* index within the SB */
__le16 *p_fw_cons; /* ptr to index value */
};
struct ecore_consq {
struct ecore_chain chain;
};
struct ecore_spq {
osal_spinlock_t lock;
osal_list_t unlimited_pending;
osal_list_t pending;
osal_list_t completion_pending;
osal_list_t free_pool;
struct ecore_chain chain;
/* allocated dma-able memory for spq entries (+ramrod data) */
dma_addr_t p_phys;
struct ecore_spq_entry *p_virt;
/* Bitmap for handling out-of-order completions */
#define SPQ_RING_SIZE \
(CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
#define SPQ_COMP_BMAP_SIZE \
(SPQ_RING_SIZE / (sizeof(unsigned long) * 8 /* BITS_PER_LONG */))
unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
u8 comp_bitmap_idx;
#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \
(OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \
(OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \
(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
/* Statistics */
u32 unlimited_pending_count;
u32 normal_count;
u32 high_count;
u32 comp_sent_count;
u32 comp_count;
u32 cid;
};
struct ecore_port;
struct ecore_hwfn;
/**
* @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
* Pends it to the future list.
*
* @param p_hwfn
* @param p_req
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent,
u8 *fw_return_code);
/**
* @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ.
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_spq_setup - Reset the SPQ to its start state.
*
* @param p_hwfn
*/
void ecore_spq_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_spq_deallocate - Deallocates the given SPQ struct.
*
* @param p_hwfn
*/
void ecore_spq_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_spq_get_entry - Obtain an entrry from the spq
* free pool list.
*
*
*
* @param p_hwfn
* @param pp_ent
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent);
/**
* @brief ecore_spq_return_entry - Return an entry to spq free
* pool list
*
* @param p_hwfn
* @param p_ent
*/
void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent);
/**
* @brief ecore_eq_allocate - Allocates & initializes an EQ struct
*
* @param p_hwfn
* @param num_elem number of elements in the eq
*
* @return struct ecore_eq* - a newly allocated structure; NULL upon error.
*/
struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
/**
* @brief ecore_eq_setup - Reset the SPQ to its start state.
*
* @param p_hwfn
* @param p_eq
*/
void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
/**
* @brief ecore_eq_deallocate - deallocates the given EQ struct.
*
* @param p_hwfn
* @param p_eq
*/
void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
/**
* @brief ecore_eq_prod_update - update the FW with default EQ producer
*
* @param p_hwfn
* @param prod
*/
void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod);
/**
* @brief ecore_eq_completion - Completes currently pending EQ elements
*
* @param p_hwfn
* @param cookie
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
void *cookie);
/**
* @brief ecore_spq_completion - Completes a single event
*
* @param p_hwfn
* @param echo - echo value from cookie (used for determining completion)
* @param p_data - data from cookie (used in callback function if applicable)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
__le16 echo,
u8 fw_return_code,
union event_ring_data *p_data);
/**
* @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
*
* @param p_hwfn
*
* @return u32 - SPQ CID
*/
u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_alloc - Allocates & initializes an ConsQ
* struct
*
* @param p_hwfn
*
* @return struct ecore_eq* - a newly allocated structure; NULL upon error.
*/
struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_setup - Reset the ConsQ to its start
* state.
*
* @param p_hwfn
* @param p_eq
*/
void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
/**
* @brief ecore_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
* @param p_eq
*/
void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
#endif /* __ECORE_SPQ_H__ */

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_STATUS_H__
#define __ECORE_STATUS_H__
enum _ecore_status_t {
ECORE_UNKNOWN_ERROR = -12,
ECORE_NORESOURCES = -11,
ECORE_NODEV = -10,
ECORE_ABORTED = -9,
ECORE_AGAIN = -8,
ECORE_NOTIMPL = -7,
ECORE_EXISTS = -6,
ECORE_IO = -5,
ECORE_TIMEOUT = -4,
ECORE_INVAL = -3,
ECORE_BUSY = -2,
ECORE_NOMEM = -1,
ECORE_SUCCESS = 0,
/* PENDING is not an error and should be positive */
ECORE_PENDING = 1,
};
#endif /* __ECORE_STATUS_H__ */

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_UTILS_H__
#define __ECORE_UTILS_H__
/* dma_addr_t manip */
#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff))
#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32))
#define DMA_LO_LE(x) OSAL_CPU_TO_LE32(DMA_LO(x))
#define DMA_HI_LE(x) OSAL_CPU_TO_LE32(DMA_HI(x))
/* It's assumed that whoever includes this has previously included an hsi
* file defining the regpair.
*/
#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \
(x).lo = DMA_LO_LE((val))
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
#define HILO_64(hi, lo) HILO_GEN(hi, lo, u64)
#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
#endif

View File

@ -0,0 +1,526 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ETH_COMMON__
#define __ETH_COMMON__
/********************/
/* ETH FW CONSTANTS */
/********************/
#define ETH_CACHE_LINE_SIZE 64
#define ETH_RX_CQE_GAP 32
#define ETH_MAX_RAMROD_PER_CON 8
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_LSO_HDR_NBD 4
#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
#define ETH_TX_LSO_WINDOW_BDS_NUM 18
#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFFFF
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define ETH_RX_MAX_BUFF_PER_PKT 5
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
#define ETH_NUM_VLAN_FILTERS 512
/* approx. multicast constants */
#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
#define ETH_MULTICAST_MAC_BINS 256
#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
/* ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
#define ETH_RSS_KEY_SIZE_REGS 10
#define ETH_RSS_ENGINE_NUM_K2 207
#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
#define ETH_TPA_MAX_AGGS_NUM 64
#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/*
* Interrupt coalescing TimeSet
*/
struct coalescing_timeset {
u8 timeset;
u8 valid /* Only if this flag is set, timeset will take effect */;
};
/*
* Destination port mode
*/
enum dest_port_mode {
DEST_PORT_PHY /* Send to physical port. */,
DEST_PORT_LOOPBACK /* Send to loopback port. */,
DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
DEST_PORT_DROP /* Drop the packet in PBF. */,
MAX_DEST_PORT_MODE
};
/*
* Ethernet address type
*/
enum eth_addr_type {
BROADCAST_ADDRESS,
MULTICAST_ADDRESS,
UNICAST_ADDRESS,
UNKNOWN_ADDRESS,
MAX_ETH_ADDR_TYPE
};
struct eth_tx_1st_bd_flags {
u8 bitfields;
#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
};
/*
* The parsing information data for the first tx bd of a given packet.
*/
struct eth_tx_data_1st_bd {
__le16 vlan /* VLAN to insert to packet (if needed). */;
/* Number of BDs in packet. Should be at least 2 in non-LSO
* packet and at least 3 in LSO (or Tunnel with IPv6+ext) packet.
*/
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields;
#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
};
/*
* The parsing information data for the second tx bd of a given packet.
*/
struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size;
__le16 bitfields1;
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
__le16 bitfields2;
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
/*
* Firmware data for L2-EDPM packet.
*/
struct eth_edpm_fw_data {
struct eth_tx_data_1st_bd data_1st_bd
/* Parsing information data from the 1st BD. */;
struct eth_tx_data_2nd_bd data_2nd_bd
/* Parsing information data from the 2nd BD. */;
__le32 reserved;
};
/*
* FW debug.
*/
struct eth_fast_path_cqe_fw_debug {
u8 reserved0 /* FW reserved. */;
u8 reserved1 /* FW reserved. */;
__le16 reserved2 /* FW reserved. */;
};
struct tunnel_parsing_flags {
u8 flags;
#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
};
/*
* Regular ETH Rx FP CQE.
*/
struct eth_fast_path_rx_reg_cqe {
u8 type /* CQE type */;
u8 bitfields;
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
__le16 pkt_len /* Total packet length (from the parser) */;
struct parsing_and_err_flags pars_flags
/* Parsing and error flags from the parser */;
__le16 vlan_tag /* 802.1q VLAN tag */;
__le32 rss_hash /* RSS hash result */;
__le16 len_on_first_bd /* Number of bytes placed on first BD */;
u8 placement_offset /* Offset of placement from BD start */;
struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
;
u8 bd_num /* Number of BDs, used for packet */;
u8 reserved[7];
struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
u8 reserved1[3];
u8 flags;
#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
};
/*
* TPA-continue ETH Rx FP CQE.
*/
struct eth_fast_path_rx_tpa_cont_cqe {
u8 type /* CQE type */;
u8 tpa_agg_index /* TPA aggregation index */;
__le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]
/* List of the segment sizes */;
u8 reserved[5];
u8 reserved1 /* FW reserved. */;
__le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */;
};
/*
* TPA-end ETH Rx FP CQE .
*/
struct eth_fast_path_rx_tpa_end_cqe {
u8 type /* CQE type */;
u8 tpa_agg_index /* TPA aggregation index */;
__le16 total_packet_len /* Total aggregated packet length */;
u8 num_of_bds /* Total number of BDs comprising the packet */;
u8 end_reason /* Aggregation end reason. Use enum eth_tpa_end_reason */
;
__le16 num_of_coalesced_segs /* Number of coalesced TCP segments */;
__le32 ts_delta /* TCP timestamp delta */;
__le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]
/* List of the segment sizes */;
u8 reserved1[3];
u8 reserved2 /* FW reserved. */;
__le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */;
};
/*
* TPA-start ETH Rx FP CQE.
*/
struct eth_fast_path_rx_tpa_start_cqe {
u8 type /* CQE type */;
u8 bitfields;
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
__le16 seg_len /* Segment length (packetLen from the parser) */;
struct parsing_and_err_flags pars_flags
/* Parsing and error flags from the parser */;
__le16 vlan_tag /* 802.1q VLAN tag */;
__le32 rss_hash /* RSS hash result */;
__le16 len_on_first_bd /* Number of bytes placed on first BD */;
u8 placement_offset /* Offset of placement from BD start */;
struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
;
u8 tpa_agg_index /* TPA aggregation index */;
u8 header_len /* Packet L2+L3+L4 header length */;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]
/* Additional BDs length list. */;
struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
};
/*
* The L4 pseudo checksum mode for Ethernet
*/
enum eth_l4_pseudo_checksum_mode {
ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
/* Pseudo Header checksum on packet is calculated
* with the correct packet length field.
*/
,
ETH_L4_PSEUDO_CSUM_ZERO_LENGTH
/* Pseudo Hdr checksum on packet is calc with zero len field. */
,
MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
};
struct eth_rx_bd {
struct regpair addr /* single continues buffer */;
};
/*
* regular ETH Rx SP CQE
*/
struct eth_slow_path_rx_cqe {
u8 type /* CQE type */;
u8 ramrod_cmd_id;
u8 error_flag;
u8 reserved[25];
__le16 echo;
u8 reserved1;
u8 flags;
#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
};
/*
* union for all ETH Rx CQE types
*/
union eth_rx_cqe {
struct eth_fast_path_rx_reg_cqe fast_path_regular /* Regular FP CQE */;
struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start
/* TPA-start CQE */;
struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont
/* TPA-continue CQE */;
struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end /* TPA-end CQE */
;
struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
};
/*
* ETH Rx CQE type
*/
enum eth_rx_cqe_type {
ETH_RX_CQE_TYPE_UNUSED,
ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */,
ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */,
ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */,
ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */,
ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */,
MAX_ETH_RX_CQE_TYPE
};
/*
* Wrapp for PD RX CQE used in order to cover full cache line when writing CQE
*/
struct eth_rx_pmd_cqe {
union eth_rx_cqe cqe /* CQE data itself */;
u8 reserved[ETH_RX_CQE_GAP];
};
/*
* ETH Rx producers data
*/
struct eth_rx_prod_data {
__le16 bd_prod /* BD producer */;
__le16 cqe_prod /* CQE producer */;
__le16 reserved;
__le16 reserved1 /* FW reserved. */;
};
/*
* Aggregation end reason.
*/
enum eth_tpa_end_reason {
ETH_AGG_END_UNUSED,
ETH_AGG_END_SP_UPDATE /* SP configuration update */,
ETH_AGG_END_MAX_LEN
/* Maximum aggregation length or maximum buffer number used. */,
ETH_AGG_END_LAST_SEG
/* TCP PSH flag or TCP payload length below continue threshold. */,
ETH_AGG_END_TIMEOUT /* Timeout expiration. */,
ETH_AGG_END_NOT_CONSISTENT,
ETH_AGG_END_OUT_OF_ORDER,
ETH_AGG_END_NON_TPA_SEG,
MAX_ETH_TPA_END_REASON
};
/*
* Eth Tunnel Type
*/
enum eth_tunn_type {
ETH_TUNN_GENEVE /* GENEVE Tunnel. */,
ETH_TUNN_TTAG /* T-Tag Tunnel. */,
ETH_TUNN_GRE /* GRE Tunnel. */,
ETH_TUNN_VXLAN /* VXLAN Tunnel. */,
MAX_ETH_TUNN_TYPE
};
/*
* The first tx bd of a given packet
*/
struct eth_tx_1st_bd {
struct regpair addr /* Single continuous buffer */;
__le16 nbytes /* Number of bytes in this BD. */;
struct eth_tx_data_1st_bd data /* Parsing information data. */;
};
/*
* The second tx bd of a given packet
*/
struct eth_tx_2nd_bd {
struct regpair addr /* Single continuous buffer */;
__le16 nbytes /* Number of bytes in this BD. */;
struct eth_tx_data_2nd_bd data /* Parsing information data. */;
};
/*
* The parsing information data for the third tx bd of a given packet.
*/
struct eth_tx_data_3rd_bd {
__le16 lso_mss /* For LSO packet - the MSS in bytes. */;
__le16 bitfields;
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
u8 tunn_l4_hdr_start_offset_w;
u8 tunn_hdr_size_w;
};
/*
* The third tx bd of a given packet
*/
struct eth_tx_3rd_bd {
struct regpair addr /* Single continuous buffer */;
__le16 nbytes /* Number of bytes in this BD. */;
struct eth_tx_data_3rd_bd data /* Parsing information data. */;
};
/*
* Complementary information for the regular tx bd of a given packet.
*/
struct eth_tx_data_bd {
__le16 reserved0;
__le16 bitfields;
#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
#define ETH_TX_DATA_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
__le16 reserved3;
};
/*
* The common regular TX BD ring element
*/
struct eth_tx_bd {
struct regpair addr /* Single continuous buffer */;
__le16 nbytes /* Number of bytes in this BD. */;
struct eth_tx_data_bd data /* Complementary information. */;
};
union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
struct eth_tx_2nd_bd second_bd /* The second tx bd of a given packet */
;
struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
struct eth_tx_bd reg_bd /* The common non-special bd */;
};
/*
* Mstorm Queue Zone
*/
struct mstorm_eth_queue_zone {
struct eth_rx_prod_data rx_producers;
__le32 reserved[2];
};
/*
* Ustorm Queue Zone
*/
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset
/* Rx interrupt coalescing TimeSet */;
__le16 reserved[3];
};
/*
* Ystorm Queue Zone
*/
struct ystorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset
/* Tx interrupt coalescing TimeSet */;
__le16 reserved[3];
};
/*
* ETH doorbell data
*/
struct eth_db_data {
u8 params;
#define ETH_DB_DATA_DEST_MASK 0x3
#define ETH_DB_DATA_DEST_SHIFT 0
#define ETH_DB_DATA_AGG_CMD_MASK 0x3
#define ETH_DB_DATA_AGG_CMD_SHIFT 2
#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
#define ETH_DB_DATA_RESERVED_MASK 0x1
#define ETH_DB_DATA_RESERVED_SHIFT 5
#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 bd_prod;
};
#endif /* __ETH_COMMON__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,913 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
/****************************************************************************
*
* Name: nvm_cfg.h
*
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
* Created: 1/14/2016
*
****************************************************************************/
#ifndef NVM_CFG_H
#define NVM_CFG_H
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
u32 mac_addr_lo;
};
/******************************************
* nvm_cfg1 structs
******************************************/
struct nvm_cfg1_glob {
u32 generic_cont0; /* 0x0 */
#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
u32 engineering_change[3]; /* 0x4 */
u32 manufacturing_id; /* 0x10 */
u32 serial_number[4]; /* 0x14 */
u32 pcie_cfg; /* 0x24 */
#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK 0x00000020
#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET 5
#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
/* Set the duration, in seconds, fan failure signal should be
* sampled
*/
#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK 0x80000000
#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET 31
u32 mgmt_traffic; /* 0x28 */
#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
#define NVM_CFG1_GLOB_AUX_MODE_MASK 0x78000000
#define NVM_CFG1_GLOB_AUX_MODE_OFFSET 27
#define NVM_CFG1_GLOB_AUX_MODE_DEFAULT 0x0
#define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY 0x1
/* Indicates whether external thermal sonsor is available */
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK 0x80000000
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET 31
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED 0x0
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED 0x1
u32 core_cfg; /* 0x2C */
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG 0x1
#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP 0x2
#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
u32 e_lane_cfg1; /* 0x30 */
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
u32 e_lane_cfg2; /* 0x34 */
#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
#define NVM_CFG1_GLOB_NCSI_OFFSET 12
#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
/* Maximum advertised pcie link width */
#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK 0x000F0000
#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET 16
#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_16_LANES 0x0
#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE 0x1
#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES 0x2
#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES 0x3
#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES 0x4
/* ASPM L1 mode */
#define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK 0x00300000
#define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET 20
#define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED 0x0
#define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY 0x1
#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK 0x01C00000
#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET 22
#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED 0x0
#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1
#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2
#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3
#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK 0x06000000
#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25
#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0
#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1
#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL 0x2
#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH 0x3
/* Set the PLDM sensor modes */
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK 0x38000000
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET 27
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
u32 f_lane_cfg1; /* 0x38 */
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
u32 f_lane_cfg2; /* 0x3C */
#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
/* Control the period between two successive checks */
#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK 0x0000FF00
#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8
/* Set shutdown temperature */
#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK 0x00FF0000
#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16
/* Set max. count for over operational temperature */
#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000
#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET 24
u32 eagle_preemphasis; /* 0x40 */
#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
u32 eagle_driver_current; /* 0x44 */
#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
u32 falcon_preemphasis; /* 0x48 */
#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
u32 falcon_driver_current; /* 0x4C */
#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
u32 pci_id; /* 0x50 */
#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
/* Set caution temperature */
#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK 0x00FF0000
#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
/* Set external thermal sensor I2C address */
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK 0xFF000000
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24
u32 pci_subsys_id; /* 0x54 */
#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
u32 bar; /* 0x58 */
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
/* Set the duration, in seconds, fan failure signal should be
* sampled
*/
#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK 0x0000F000
#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET 12
u32 eagle_txfir_main; /* 0x5C */
#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
u32 eagle_txfir_post; /* 0x60 */
#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
u32 falcon_txfir_main; /* 0x64 */
#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
u32 falcon_txfir_post; /* 0x68 */
#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
u32 manufacture_ver; /* 0x6C */
#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
u32 manufacture_time; /* 0x70 */
#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
u32 led_global_settings; /* 0x74 */
#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
u32 generic_cont1; /* 0x78 */
#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
u32 mbi_version; /* 0x7C */
#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
u32 mbi_date; /* 0x80 */
u32 misc_sig; /* 0x84 */
/* Define the GPIO mapping to switch i2c mux */
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
u32 device_capabilities; /* 0x88 */
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
u32 power_dissipated; /* 0x8C */
#define NVM_CFG1_GLOB_POWER_DIS_D0_MASK 0x000000FF
#define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET 0
#define NVM_CFG1_GLOB_POWER_DIS_D1_MASK 0x0000FF00
#define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET 8
#define NVM_CFG1_GLOB_POWER_DIS_D2_MASK 0x00FF0000
#define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET 16
#define NVM_CFG1_GLOB_POWER_DIS_D3_MASK 0xFF000000
#define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET 24
u32 power_consumed; /* 0x90 */
#define NVM_CFG1_GLOB_POWER_CONS_D0_MASK 0x000000FF
#define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET 0
#define NVM_CFG1_GLOB_POWER_CONS_D1_MASK 0x0000FF00
#define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET 8
#define NVM_CFG1_GLOB_POWER_CONS_D2_MASK 0x00FF0000
#define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET 16
#define NVM_CFG1_GLOB_POWER_CONS_D3_MASK 0xFF000000
#define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET 24
u32 efi_version; /* 0x94 */
u32 reserved[42]; /* 0x98 */
};
struct nvm_cfg1_path {
u32 reserved[30]; /* 0x0 */
};
struct nvm_cfg1_port {
u32 reserved__m_relocated_to_option_123; /* 0x0 */
u32 reserved__m_relocated_to_option_124; /* 0x4 */
u32 generic_cont0; /* 0x8 */
#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
u32 pcie_cfg; /* 0xC */
#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
u32 features; /* 0x10 */
#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
u32 speed_cap_mask; /* 0x14 */
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
u32 link_settings; /* 0x18 */
#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
#define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
#define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK 0x00018000
#define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET 15
#define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM 0x0
#define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM 0x1
#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000E0000
#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_NONE 0x0
#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_FIRECODE 0x1
#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_RS 0x2
u32 phy_cfg; /* 0x1C */
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
u32 mgmt_traffic; /* 0x20 */
#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
u32 ext_phy; /* 0x24 */
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
u32 mba_cfg1; /* 0x28 */
#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
u32 mba_cfg2; /* 0x2C */
#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
u32 vf_cfg; /* 0x30 */
#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
u32 led_port_settings; /* 0x3C */
#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
u32 transceiver_00; /* 0x40 */
/* Define for mapping of transceiver signal module absent */
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
/* Define the GPIO mux settings to switch i2c mux to this port */
#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
u32 device_ids; /* 0x44 */
#define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK 0x000000FF
#define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET 0
#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK 0xFF000000
#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET 24
u32 board_cfg; /* 0x48 */
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF
#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
/* This field defines the GPIO mapped to tx_disable signal in SFP */
#define NVM_CFG1_PORT_TX_DISABLE_MASK 0x0000FF00
#define NVM_CFG1_PORT_TX_DISABLE_OFFSET 8
#define NVM_CFG1_PORT_TX_DISABLE_NA 0x0
#define NVM_CFG1_PORT_TX_DISABLE_GPIO0 0x1
#define NVM_CFG1_PORT_TX_DISABLE_GPIO1 0x2
#define NVM_CFG1_PORT_TX_DISABLE_GPIO2 0x3
#define NVM_CFG1_PORT_TX_DISABLE_GPIO3 0x4
#define NVM_CFG1_PORT_TX_DISABLE_GPIO4 0x5
#define NVM_CFG1_PORT_TX_DISABLE_GPIO5 0x6
#define NVM_CFG1_PORT_TX_DISABLE_GPIO6 0x7
#define NVM_CFG1_PORT_TX_DISABLE_GPIO7 0x8
#define NVM_CFG1_PORT_TX_DISABLE_GPIO8 0x9
#define NVM_CFG1_PORT_TX_DISABLE_GPIO9 0xA
#define NVM_CFG1_PORT_TX_DISABLE_GPIO10 0xB
#define NVM_CFG1_PORT_TX_DISABLE_GPIO11 0xC
#define NVM_CFG1_PORT_TX_DISABLE_GPIO12 0xD
#define NVM_CFG1_PORT_TX_DISABLE_GPIO13 0xE
#define NVM_CFG1_PORT_TX_DISABLE_GPIO14 0xF
#define NVM_CFG1_PORT_TX_DISABLE_GPIO15 0x10
#define NVM_CFG1_PORT_TX_DISABLE_GPIO16 0x11
#define NVM_CFG1_PORT_TX_DISABLE_GPIO17 0x12
#define NVM_CFG1_PORT_TX_DISABLE_GPIO18 0x13
#define NVM_CFG1_PORT_TX_DISABLE_GPIO19 0x14
#define NVM_CFG1_PORT_TX_DISABLE_GPIO20 0x15
#define NVM_CFG1_PORT_TX_DISABLE_GPIO21 0x16
#define NVM_CFG1_PORT_TX_DISABLE_GPIO22 0x17
#define NVM_CFG1_PORT_TX_DISABLE_GPIO23 0x18
#define NVM_CFG1_PORT_TX_DISABLE_GPIO24 0x19
#define NVM_CFG1_PORT_TX_DISABLE_GPIO25 0x1A
#define NVM_CFG1_PORT_TX_DISABLE_GPIO26 0x1B
#define NVM_CFG1_PORT_TX_DISABLE_GPIO27 0x1C
#define NVM_CFG1_PORT_TX_DISABLE_GPIO28 0x1D
#define NVM_CFG1_PORT_TX_DISABLE_GPIO29 0x1E
#define NVM_CFG1_PORT_TX_DISABLE_GPIO30 0x1F
#define NVM_CFG1_PORT_TX_DISABLE_GPIO31 0x20
u32 reserved[131]; /* 0x4C */
};
struct nvm_cfg1_func {
struct nvm_cfg_mac_address mac_address; /* 0x0 */
u32 rsrv1; /* 0x8 */
#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
u32 rsrv2; /* 0xC */
#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
u32 device_id; /* 0x10 */
#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
u32 cmn_cfg; /* 0x14 */
#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
u32 pci_cfg; /* 0x18 */
#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
u32 preboot_generic_cfg; /* 0x2C */
#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK 0x0000FFFF
#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
u32 reserved[8]; /* 0x30 */
};
struct nvm_cfg1 {
struct nvm_cfg1_glob glob; /* 0x0 */
struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
};
/******************************************
* nvm_cfg structs
******************************************/
enum nvm_cfg_sections {
NVM_CFG_SECTION_NVM_CFG1,
NVM_CFG_SECTION_MAX
};
struct nvm_cfg {
u32 num_sections;
u32 sections_offset[NVM_CFG_SECTION_MAX];
struct nvm_cfg1 cfg1;
};
#endif /* NVM_CFG_H */

File diff suppressed because it is too large Load Diff